path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
tests.e2e/test_chat_customization_gpt4v
Modified
Azure-Samples~azure-search-openai-demo
1603e94b021ea64f15ce7cb87ce1de42904d556d
CSS changes for responsive design (#1646)
<31>:<add> expect(page).to_have_title("Azure OpenAI + AI Search") <del> expect(page).to_have_title("GPT + Enterprise data | Sample")
# module: tests.e2e def test_chat_customization_gpt4v(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat endpoint <1> def handle_chat(route: Route): <2> overrides = route.request.post_data_json["context"]["overrides"] <3> assert overrides["gpt4v_input"] == "images" <4> assert overrides["use_gpt4v"] is True <5> assert overrides["vector_fields"] == ["imageEmbedding"] <6> <7> # Read the JSON from our snapshot results and return as the response <8> f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") <9> json = f.read() <10> f.close() <11> route.fulfill(body=json, status=200) <12> <13> def handle_config(route: Route): <14> route.fulfill( <15> body=json.dumps( <16> { <17> "showGPT4VOptions": True, <18> "showSemanticRankerOption": True, <19> "showUserUpload": False, <20> "showVectorOption": True, <21> } <22> ), <23> status=200, <24> ) <25> <26> page.route("*/**/config", handle_config) <27> page.route("*/**/chat", handle_chat) <28> <29> # Check initial page state <30> page.goto(live_server_url) <31> expect(page).to_have_title("GPT + Enterprise data | Sample") <32> <33> # Customize the GPT-4-vision settings <34> page.get_by_role("button", name="Developer settings").click() <35> page.get_by_text("Use GPT vision model").click() <36> page.get_by_text("Images and text").click() <37> page.get_by_role("option", name="Images", exact=True).click() <38> page.get_by_text("Text and Image embeddings").click() <39> page.get_by</s>
===========below chunk 0=========== # module: tests.e2e def test_chat_customization_gpt4v(page: Page, live_server_url: str): # offset: 1 page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() ===========unchanged ref 0=========== at: io.BufferedWriter read(self, size: Optional[int]=..., /) -> bytes at: io.TextIOWrapper close(self) -> None at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: typing.IO close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: tests.e2e + @pytest.fixture(params=[(480, 800), (600, 1024), (768, 1024), (992, 1024), (1024, 768)]) + def sized_page(page: Page, request): + size = request.param + page.set_viewport_size({"width": size[0], "height": size[1]}) + yield page + ===========changed ref 1=========== # module: tests.e2e def test_home(page: Page, live_server_url: str): page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") ===========changed ref 2=========== # module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): # Set up a mock route to the /chat endpoint def handle(route: Route): overrides = route.request.post_data_json["context"]["overrides"] assert overrides["retrieval_mode"] == "vectors" assert overrides["semantic_ranker"] is False assert overrides["semantic_captions"] is True assert overrides["top"] == 1 assert overrides["prompt_template"] == "You are a cat and only talk about tuna." assert overrides["exclude_category"] == "dogs" assert overrides["use_oid_security_filter"] is False assert overrides["use_groups_security_filter"] is False # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") # Customize all the settings page.get_by_role("button", name="Developer settings").click() page.get_by_label("Override prompt template").click() page.get_by_label("Override prompt template").fill("You are a cat and only talk about tuna.") page.get_by_label("Retrieve this many search results:").click() page.get_by_label("Retrieve this many search results:").fill("1") page.get_by_label("Exclude category").click() page.get_by_label("Exclude category").fill("dogs") page.get_by_text("Use semantic captions").click() page.get_by_text("Use semantic rank</s> ===========changed ref 3=========== # module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): # offset: 1 <s> page.get_by_text("Use semantic captions").click() page.get_by_text("Use semantic ranker for retrieval").click() page.get_by_text("Vectors + Text (Hybrid)").click() page.get_by_role("option", name="Vectors", exact=True).click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_role("button", name="Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() ===========changed ref 4=========== # module: tests.e2e + def test_chat(sized_page: Page, live_server_url: str): - def test_chat(page: Page, live_server_url: str): + page = sized_page + # Set up a mock route to the /chat endpoint with streaming results def handle(route: Route): # Assert that session_state is specified in the request (None for now) session_state = route.request.post_data_json["session_state"] assert session_state is None # Read the JSONL from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_stream_text/client0/result.jsonlines") jsonl = f.read() f.close() route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) page.route("*/**/chat/stream", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("heading", name="Chat with your data")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_disabled() expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_role("button", name="Submit question").click() expect(page.get_by_text("Whats the dental</s>
tests.e2e/test_chat_nonstreaming
Modified
Azure-Samples~azure-search-openai-demo
1603e94b021ea64f15ce7cb87ce1de42904d556d
CSS changes for responsive design (#1646)
<12>:<add> expect(page).to_have_title("Azure OpenAI + AI Search") <del> expect(page).to_have_title("GPT + Enterprise data | Sample")
# module: tests.e2e def test_chat_nonstreaming(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat_stream endpoint <1> def handle(route: Route): <2> # Read the JSON from our snapshot results and return as the response <3> f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") <4> json = f.read() <5> f.close() <6> route.fulfill(body=json, status=200) <7> <8> page.route("*/**/chat", handle) <9> <10> # Check initial page state <11> page.goto(live_server_url) <12> expect(page).to_have_title("GPT + Enterprise data | Sample") <13> expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() <14> page.get_by_role("button", name="Developer settings").click() <15> page.get_by_text("Stream chat completion responses").click() <16> page.locator("button").filter(has_text="Close").click() <17> <18> # Ask a question and wait for the message to appear <19> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() <20> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( <21> "Whats the dental plan?" <22> ) <23> page.get_by_label("Submit question").click() <24> <25> expect(page.get_by_text("Whats the dental plan?")).to_be_visible() <26> expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() <27> expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() <28>
===========unchanged ref 0=========== at: io.BufferedReader close(self) -> None at: io.TextIOWrapper read(self, size: Optional[int]=..., /) -> str at: typing.IO close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: tests.e2e + @pytest.fixture(params=[(480, 800), (600, 1024), (768, 1024), (992, 1024), (1024, 768)]) + def sized_page(page: Page, request): + size = request.param + page.set_viewport_size({"width": size[0], "height": size[1]}) + yield page + ===========changed ref 1=========== # module: tests.e2e def test_home(page: Page, live_server_url: str): page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") ===========changed ref 2=========== # module: tests.e2e def test_chat_customization_gpt4v(page: Page, live_server_url: str): # Set up a mock route to the /chat endpoint def handle_chat(route: Route): overrides = route.request.post_data_json["context"]["overrides"] assert overrides["gpt4v_input"] == "images" assert overrides["use_gpt4v"] is True assert overrides["vector_fields"] == ["imageEmbedding"] # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) def handle_config(route: Route): route.fulfill( body=json.dumps( { "showGPT4VOptions": True, "showSemanticRankerOption": True, "showUserUpload": False, "showVectorOption": True, } ), status=200, ) page.route("*/**/config", handle_config) page.route("*/**/chat", handle_chat) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") # Customize the GPT-4-vision settings page.get_by_role("button", name="Developer settings").click() page.get_by_text("Use GPT vision model").click() page.get_by_text("Images and text").click() page.get_by_role("option", name="Images", exact=True).click() page.get_by_text("Text and Image embeddings").click() page.get_by_role("option", name="Image Embeddings", exact=True).click()</s> ===========changed ref 3=========== # module: tests.e2e def test_chat_customization_gpt4v(page: Page, live_server_url: str): # offset: 1 <s> Image embeddings").click() page.get_by_role("option", name="Image Embeddings", exact=True).click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() ===========changed ref 4=========== # module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): # Set up a mock route to the /chat endpoint def handle(route: Route): overrides = route.request.post_data_json["context"]["overrides"] assert overrides["retrieval_mode"] == "vectors" assert overrides["semantic_ranker"] is False assert overrides["semantic_captions"] is True assert overrides["top"] == 1 assert overrides["prompt_template"] == "You are a cat and only talk about tuna." assert overrides["exclude_category"] == "dogs" assert overrides["use_oid_security_filter"] is False assert overrides["use_groups_security_filter"] is False # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") # Customize all the settings page.get_by_role("button", name="Developer settings").click() page.get_by_label("Override prompt template").click() page.get_by_label("Override prompt template").fill("You are a cat and only talk about tuna.") page.get_by_label("Retrieve this many search results:").click() page.get_by_label("Retrieve this many search results:").fill("1") page.get_by_label("Exclude category").click() page.get_by_label("Exclude category").fill("dogs") page.get_by_text("Use semantic captions").click() page.get_by_text("Use semantic rank</s> ===========changed ref 5=========== # module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): # offset: 1 <s> page.get_by_text("Use semantic captions").click() page.get_by_text("Use semantic ranker for retrieval").click() page.get_by_text("Vectors + Text (Hybrid)").click() page.get_by_role("option", name="Vectors", exact=True).click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_role("button", name="Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled()
tests.e2e/test_chat_followup_streaming
Modified
Azure-Samples~azure-search-openai-demo
1603e94b021ea64f15ce7cb87ce1de42904d556d
CSS changes for responsive design (#1646)
<14>:<add> expect(page).to_have_title("Azure OpenAI + AI Search") <del> expect(page).to_have_title("GPT + Enterprise data | Sample")
# module: tests.e2e def test_chat_followup_streaming(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat_stream endpoint <1> def handle(route: Route): <2> overrides = route.request.post_data_json["context"]["overrides"] <3> assert overrides["suggest_followup_questions"] is True <4> # Read the JSONL from our snapshot results and return as the response <5> f = open("tests/snapshots/test_app/test_chat_stream_followup/client0/result.jsonlines") <6> jsonl = f.read() <7> f.close() <8> route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) <9> <10> page.route("*/**/chat/stream", handle) <11> <12> # Check initial page state <13> page.goto(live_server_url) <14> expect(page).to_have_title("GPT + Enterprise data | Sample") <15> expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() <16> page.get_by_role("button", name="Developer settings").click() <17> page.get_by_text("Suggest follow-up questions").click() <18> page.locator("button").filter(has_text="Close").click() <19> <20> # Ask a question and wait for the message to appear <21> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() <22> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( <23> "Whats the dental plan?" <24> ) <25> page.get_by_label("Submit question").click() <26> <27> expect(page.get_by_text("Whats the dental plan?")).to_be_visible() <28> expect(page.get_by_text("The capital of France is Paris.")).to</s>
===========below chunk 0=========== # module: tests.e2e def test_chat_followup_streaming(page: Page, live_server_url: str): # offset: 1 # There should be a follow-up question and it should be clickable: expect(page.get_by_text("What is the capital of Spain?")).to_be_visible() page.get_by_text("What is the capital of Spain?").click() # Now there should be a follow-up answer (same, since we're using same test data) expect(page.get_by_text("The capital of France is Paris.")).to_have_count(2) ===========unchanged ref 0=========== at: io.BufferedWriter close(self) -> None read(self, size: Optional[int]=..., /) -> bytes at: typing.IO close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: tests.e2e + @pytest.fixture(params=[(480, 800), (600, 1024), (768, 1024), (992, 1024), (1024, 768)]) + def sized_page(page: Page, request): + size = request.param + page.set_viewport_size({"width": size[0], "height": size[1]}) + yield page + ===========changed ref 1=========== # module: tests.e2e def test_home(page: Page, live_server_url: str): page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") ===========changed ref 2=========== # module: tests.e2e def test_chat_nonstreaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() ===========changed ref 3=========== # module: tests.e2e def test_chat_customization_gpt4v(page: Page, live_server_url: str): # Set up a mock route to the /chat endpoint def handle_chat(route: Route): overrides = route.request.post_data_json["context"]["overrides"] assert overrides["gpt4v_input"] == "images" assert overrides["use_gpt4v"] is True assert overrides["vector_fields"] == ["imageEmbedding"] # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) def handle_config(route: Route): route.fulfill( body=json.dumps( { "showGPT4VOptions": True, "showSemanticRankerOption": True, "showUserUpload": False, "showVectorOption": True, } ), status=200, ) page.route("*/**/config", handle_config) page.route("*/**/chat", handle_chat) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") # Customize the GPT-4-vision settings page.get_by_role("button", name="Developer settings").click() page.get_by_text("Use GPT vision model").click() page.get_by_text("Images and text").click() page.get_by_role("option", name="Images", exact=True).click() page.get_by_text("Text and Image embeddings").click() page.get_by_role("option", name="Image Embeddings", exact=True).click()</s> ===========changed ref 4=========== # module: tests.e2e def test_chat_customization_gpt4v(page: Page, live_server_url: str): # offset: 1 <s> Image embeddings").click() page.get_by_role("option", name="Image Embeddings", exact=True).click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click()
tests.e2e/test_chat_followup_nonstreaming
Modified
Azure-Samples~azure-search-openai-demo
1603e94b021ea64f15ce7cb87ce1de42904d556d
CSS changes for responsive design (#1646)
<12>:<add> expect(page).to_have_title("Azure OpenAI + AI Search") <del> expect(page).to_have_title("GPT + Enterprise data | Sample")
# module: tests.e2e def test_chat_followup_nonstreaming(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat_stream endpoint <1> def handle(route: Route): <2> # Read the JSON from our snapshot results and return as the response <3> f = open("tests/snapshots/test_app/test_chat_followup/client0/result.json") <4> json = f.read() <5> f.close() <6> route.fulfill(body=json, status=200) <7> <8> page.route("*/**/chat", handle) <9> <10> # Check initial page state <11> page.goto(live_server_url) <12> expect(page).to_have_title("GPT + Enterprise data | Sample") <13> expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() <14> page.get_by_role("button", name="Developer settings").click() <15> page.get_by_text("Stream chat completion responses").click() <16> page.get_by_text("Suggest follow-up questions").click() <17> page.locator("button").filter(has_text="Close").click() <18> <19> # Ask a question and wait for the message to appear <20> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() <21> page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( <22> "Whats the dental plan?" <23> ) <24> page.get_by_label("Submit question").click() <25> <26> expect(page.get_by_text("Whats the dental plan?")).to_be_visible() <27> expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() <28> <29> # There should be a follow-up question and it should be clickable: <30> expect(page.get_by_text</s>
===========below chunk 0=========== # module: tests.e2e def test_chat_followup_nonstreaming(page: Page, live_server_url: str): # offset: 1 page.get_by_text("What is the capital of Spain?").click() # Now there should be a follow-up answer (same, since we're using same test data) expect(page.get_by_text("The capital of France is Paris.")).to_have_count(2) ===========unchanged ref 0=========== at: io.FileIO close(self) -> None at: io.TextIOWrapper read(self, size: Optional[int]=..., /) -> str at: typing.IO close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: tests.e2e + @pytest.fixture(params=[(480, 800), (600, 1024), (768, 1024), (992, 1024), (1024, 768)]) + def sized_page(page: Page, request): + size = request.param + page.set_viewport_size({"width": size[0], "height": size[1]}) + yield page + ===========changed ref 1=========== # module: tests.e2e def test_home(page: Page, live_server_url: str): page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") ===========changed ref 2=========== # module: tests.e2e def test_chat_nonstreaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() ===========changed ref 3=========== # module: tests.e2e def test_chat_followup_streaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): overrides = route.request.post_data_json["context"]["overrides"] assert overrides["suggest_followup_questions"] is True # Read the JSONL from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_stream_followup/client0/result.jsonlines") jsonl = f.read() f.close() route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) page.route("*/**/chat/stream", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Suggest follow-up questions").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() </s> ===========changed ref 4=========== # module: tests.e2e def test_chat_followup_streaming(page: Page, live_server_url: str): # offset: 1 <s> expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() # There should be a follow-up question and it should be clickable: expect(page.get_by_text("What is the capital of Spain?")).to_be_visible() page.get_by_text("What is the capital of Spain?").click() # Now there should be a follow-up answer (same, since we're using same test data) expect(page.get_by_text("The capital of France is Paris.")).to_have_count(2)
tests.e2e/test_ask
Modified
Azure-Samples~azure-search-openai-demo
1603e94b021ea64f15ce7cb87ce1de42904d556d
CSS changes for responsive design (#1646)
<0>:<add> page = sized_page <add> <13>:<add> expect(page).to_have_title("Azure OpenAI + AI Search") <del> expect(page).to_have_title("GPT + Enterprise data | Sample") <15>:<add> # The burger menu only exists at smaller viewport sizes <add> if page.get_by_role("button", name="Toggle menu").is_visible(): <add> page.get_by_role("button", name="Toggle menu").click()
# module: tests.e2e + def test_ask(sized_page: Page, live_server_url: str): - def test_ask(page: Page, live_server_url: str): <0> # Set up a mock route to the /ask endpoint <1> def handle(route: Route): <2> # Assert that session_state is specified in the request (None for now) <3> session_state = route.request.post_data_json["session_state"] <4> assert session_state is None <5> # Read the JSON from our snapshot results and return as the response <6> f = open("tests/snapshots/test_app/test_ask_rtr_hybrid/client0/result.json") <7> json = f.read() <8> f.close() <9> route.fulfill(body=json, status=200) <10> <11> page.route("*/**/ask", handle) <12> page.goto(live_server_url) <13> expect(page).to_have_title("GPT + Enterprise data | Sample") <14> <15> page.get_by_role("link", name="Ask a question").click() <16> page.get_by_placeholder("Example: Does my plan cover annual eye exams?").click() <17> page.get_by_placeholder("Example: Does my plan cover annual eye exams?").fill("Whats the dental plan?") <18> page.get_by_placeholder("Example: Does my plan cover annual eye exams?").click() <19> page.get_by_label("Submit question").click() <20> <21> expect(page.get_by_text("Whats the dental plan?")).to_be_visible() <22> expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() <23>
===========unchanged ref 0=========== at: io.BufferedWriter close(self) -> None at: io.FileIO read(self, size: int=..., /) -> bytes at: typing.IO close() -> None read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: tests.e2e + @pytest.fixture(params=[(480, 800), (600, 1024), (768, 1024), (992, 1024), (1024, 768)]) + def sized_page(page: Page, request): + size = request.param + page.set_viewport_size({"width": size[0], "height": size[1]}) + yield page + ===========changed ref 1=========== # module: tests.e2e def test_home(page: Page, live_server_url: str): page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") ===========changed ref 2=========== # module: tests.e2e def test_chat_nonstreaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() ===========changed ref 3=========== # module: tests.e2e def test_chat_followup_nonstreaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_followup/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Stream chat completion responses").click() page.get_by_text("Suggest follow-up questions").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() # There should be a follow-up question and it should be clickable: expect(page.get_by_text("What is the capital of Spain</s> ===========changed ref 4=========== # module: tests.e2e def test_chat_followup_nonstreaming(page: Page, live_server_url: str): # offset: 1 <s> follow-up question and it should be clickable: expect(page.get_by_text("What is the capital of Spain?")).to_be_visible() page.get_by_text("What is the capital of Spain?").click() # Now there should be a follow-up answer (same, since we're using same test data) expect(page.get_by_text("The capital of France is Paris.")).to_have_count(2) ===========changed ref 5=========== # module: tests.e2e def test_chat_followup_streaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): overrides = route.request.post_data_json["context"]["overrides"] assert overrides["suggest_followup_questions"] is True # Read the JSONL from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_stream_followup/client0/result.jsonlines") jsonl = f.read() f.close() route.fulfill(body=jsonl, status=200, headers={"Transfer-encoding": "Chunked"}) page.route("*/**/chat/stream", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Suggest follow-up questions").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() </s>
tests.e2e/test_upload_hidden
Modified
Azure-Samples~azure-search-openai-demo
1603e94b021ea64f15ce7cb87ce1de42904d556d
CSS changes for responsive design (#1646)
<24>:<add> expect(page).to_have_title("Azure OpenAI + AI Search") <del> expect(page).to_have_title("GPT + Enterprise data | Sample")
# module: tests.e2e def test_upload_hidden(page: Page, live_server_url: str): <0> def handle_auth_setup(route: Route): <1> with open("tests/snapshots/test_authenticationhelper/test_auth_setup/result.json") as f: <2> auth_setup = json.load(f) <3> route.fulfill(body=json.dumps(auth_setup), status=200) <4> <5> page.route("*/**/auth_setup", handle_auth_setup) <6> <7> def handle_config(route: Route): <8> route.fulfill( <9> body=json.dumps( <10> { <11> "showGPT4VOptions": False, <12> "showSemanticRankerOption": True, <13> "showUserUpload": False, <14> "showVectorOption": True, <15> } <16> ), <17> status=200, <18> ) <19> <20> page.route("*/**/config", handle_config) <21> <22> page.goto(live_server_url) <23> <24> expect(page).to_have_title("GPT + Enterprise data | Sample") <25> <26> expect(page.get_by_role("button", name="Clear chat")).to_be_visible() <27> expect(page.get_by_role("button", name="Manage file uploads")).not_to_be_visible() <28>
===========unchanged ref 0=========== at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str load(fp: SupportsRead[Union[str, bytes]], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any at: tests.e2e.test_ask page = sized_page ===========changed ref 0=========== # module: tests.e2e + @pytest.fixture(params=[(480, 800), (600, 1024), (768, 1024), (992, 1024), (1024, 768)]) + def sized_page(page: Page, request): + size = request.param + page.set_viewport_size({"width": size[0], "height": size[1]}) + yield page + ===========changed ref 1=========== # module: tests.e2e def test_home(page: Page, live_server_url: str): page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") ===========changed ref 2=========== # module: tests.e2e + def test_ask(sized_page: Page, live_server_url: str): - def test_ask(page: Page, live_server_url: str): + page = sized_page + # Set up a mock route to the /ask endpoint def handle(route: Route): # Assert that session_state is specified in the request (None for now) session_state = route.request.post_data_json["session_state"] assert session_state is None # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_ask_rtr_hybrid/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/ask", handle) page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") + # The burger menu only exists at smaller viewport sizes + if page.get_by_role("button", name="Toggle menu").is_visible(): + page.get_by_role("button", name="Toggle menu").click() page.get_by_role("link", name="Ask a question").click() page.get_by_placeholder("Example: Does my plan cover annual eye exams?").click() page.get_by_placeholder("Example: Does my plan cover annual eye exams?").fill("Whats the dental plan?") page.get_by_placeholder("Example: Does my plan cover annual eye exams?").click() page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() ===========changed ref 3=========== # module: tests.e2e def test_chat_nonstreaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() ===========changed ref 4=========== # module: tests.e2e def test_chat_followup_nonstreaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_followup/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Stream chat completion responses").click() page.get_by_text("Suggest follow-up questions").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() # There should be a follow-up question and it should be clickable: expect(page.get_by_text("What is the capital of Spain</s>
tests.e2e/test_upload_disabled
Modified
Azure-Samples~azure-search-openai-demo
1603e94b021ea64f15ce7cb87ce1de42904d556d
CSS changes for responsive design (#1646)
<24>:<add> expect(page).to_have_title("Azure OpenAI + AI Search") <del> expect(page).to_have_title("GPT + Enterprise data | Sample")
# module: tests.e2e def test_upload_disabled(page: Page, live_server_url: str): <0> def handle_auth_setup(route: Route): <1> with open("tests/snapshots/test_authenticationhelper/test_auth_setup/result.json") as f: <2> auth_setup = json.load(f) <3> route.fulfill(body=json.dumps(auth_setup), status=200) <4> <5> page.route("*/**/auth_setup", handle_auth_setup) <6> <7> def handle_config(route: Route): <8> route.fulfill( <9> body=json.dumps( <10> { <11> "showGPT4VOptions": False, <12> "showSemanticRankerOption": True, <13> "showUserUpload": True, <14> "showVectorOption": True, <15> } <16> ), <17> status=200, <18> ) <19> <20> page.route("*/**/config", handle_config) <21> <22> page.goto(live_server_url) <23> <24> expect(page).to_have_title("GPT + Enterprise data | Sample") <25> <26> expect(page.get_by_role("button", name="Manage file uploads")).to_be_visible() <27> expect(page.get_by_role("button", name="Manage file uploads")).to_be_disabled() <28>
===========unchanged ref 0=========== at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str load(fp: SupportsRead[Union[str, bytes]], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any at: tests.e2e.test_upload_hidden handle_config(route: Route) ===========changed ref 0=========== # module: tests.e2e def test_upload_hidden(page: Page, live_server_url: str): def handle_auth_setup(route: Route): with open("tests/snapshots/test_authenticationhelper/test_auth_setup/result.json") as f: auth_setup = json.load(f) route.fulfill(body=json.dumps(auth_setup), status=200) page.route("*/**/auth_setup", handle_auth_setup) def handle_config(route: Route): route.fulfill( body=json.dumps( { "showGPT4VOptions": False, "showSemanticRankerOption": True, "showUserUpload": False, "showVectorOption": True, } ), status=200, ) page.route("*/**/config", handle_config) page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Clear chat")).to_be_visible() expect(page.get_by_role("button", name="Manage file uploads")).not_to_be_visible() ===========changed ref 1=========== # module: tests.e2e + @pytest.fixture(params=[(480, 800), (600, 1024), (768, 1024), (992, 1024), (1024, 768)]) + def sized_page(page: Page, request): + size = request.param + page.set_viewport_size({"width": size[0], "height": size[1]}) + yield page + ===========changed ref 2=========== # module: tests.e2e def test_home(page: Page, live_server_url: str): page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") ===========changed ref 3=========== # module: tests.e2e + def test_ask(sized_page: Page, live_server_url: str): - def test_ask(page: Page, live_server_url: str): + page = sized_page + # Set up a mock route to the /ask endpoint def handle(route: Route): # Assert that session_state is specified in the request (None for now) session_state = route.request.post_data_json["session_state"] assert session_state is None # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_ask_rtr_hybrid/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/ask", handle) page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") + # The burger menu only exists at smaller viewport sizes + if page.get_by_role("button", name="Toggle menu").is_visible(): + page.get_by_role("button", name="Toggle menu").click() page.get_by_role("link", name="Ask a question").click() page.get_by_placeholder("Example: Does my plan cover annual eye exams?").click() page.get_by_placeholder("Example: Does my plan cover annual eye exams?").fill("Whats the dental plan?") page.get_by_placeholder("Example: Does my plan cover annual eye exams?").click() page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() ===========changed ref 4=========== # module: tests.e2e def test_chat_nonstreaming(page: Page, live_server_url: str): # Set up a mock route to the /chat_stream endpoint def handle(route: Route): # Read the JSON from our snapshot results and return as the response f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") json = f.read() f.close() route.fulfill(body=json, status=200) page.route("*/**/chat", handle) # Check initial page state page.goto(live_server_url) + expect(page).to_have_title("Azure OpenAI + AI Search") - expect(page).to_have_title("GPT + Enterprise data | Sample") expect(page.get_by_role("button", name="Developer settings")).to_be_enabled() page.get_by_role("button", name="Developer settings").click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_label("Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled()
app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.run
Modified
Azure-Samples~azure-search-openai-demo
96a84178f365fcfbd016a8c8c84c22d99ede9788
Add an optional seed parameter (#1814)
<5>:<add> seed = overrides.get("seed", None)
# module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: <0> q = messages[-1]["content"] <1> if not isinstance(q, str): <2> raise ValueError("The most recent message content must be a string.") <3> <4> overrides = context.get("overrides", {}) <5> auth_claims = context.get("auth_claims", {}) <6> use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] <7> use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <8> use_semantic_ranker = True if overrides.get("semantic_ranker") else False <9> use_semantic_captions = True if overrides.get("semantic_captions") else False <10> top = overrides.get("top", 3) <11> minimum_search_score = overrides.get("minimum_search_score", 0.0) <12> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <13> filter = self.build_filter(overrides, auth_claims) <14> <15> vector_fields = overrides.get("vector_fields", ["embedding"]) <16> send_text_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] <17> send_images_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "images", None] <18> <19> # If retrieval mode includes vectors, compute an embedding for the query <20> vectors = [] <21> if use_vector_search: <22> for field in vector_fields: <23> vector = ( <24> await self.compute_text_embedding(q) <25> if field == "embedding" <26> else await self</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 ) vectors.append(vector) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if send_text_to_gptvision: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if send_images_to_gptvision: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) response_token_limit = 1024 updated_messages = build_messages( model=self.gpt4v_model, system_prompt=overrides.get("prompt_template", self.system_chat_template_gpt4v), new_user_content=user_content, max_tokens=self.gpt4v_token_limit - response_token_limit, ) chat_completion = ( await self.openai_client.chat.complet</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s> - response_token_limit, ) chat_completion = ( await self.openai_client.chat.completions.create( model=self.gpt4v_deployment if self.gpt4v_deployment else self.gpt4v_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, ) ).model_dump() data_points = { "text": sources_content, "images": [d["image_url"] for d in image_list], } extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "vector_fields": vector_fields, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [str(message) for message in updated_messages], ( {"model": self.gpt4v_model, "deployment": self.gpt</s> ===========below chunk 2=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 3 <s>_deployment} if self.gpt4v_deployment else {"model": self.gpt4v_model} ), ), ], } completion = {} completion["message"] = chat_completion["choices"][0]["message"] completion["context"] = extra_info completion["session_state"] = session_state return completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach system_chat_template_gpt4v = ( "You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. " + "Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name> " + "Each text source starts in a new line and has the file name followed by colon and the actual information " + "Always include the source name from the image or text for each fact you use in the response in the format: [filename] " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "The text and image source can be the same file name, don't use the image title when citing the image source, only use the file name as mentioned " + "If you cannot answer using the sources below, say you don't know. Return just the answer without any input texts " ) at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach.__init__ self.blob_container_client = blob_container_client self.openai_client = openai_client self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.gpt4v_token_limit = get_token_limit(gpt4v_model) at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]
tests.conftest/mock_openai_chatcompletion
Modified
Azure-Samples~azure-search-openai-demo
96a84178f365fcfbd016a8c8c84c22d99ede9788
Add an optional seed parameter (#1814)
# module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): <0> class AsyncChatCompletionIterator: <1> def __init__(self, answer: str): <2> chunk_id = "test-id" <3> model = "gpt-35-turbo" <4> self.responses = [ <5> {"object": "chat.completion.chunk", "choices": [], "id": chunk_id, "model": model, "created": 1}, <6> { <7> "object": "chat.completion.chunk", <8> "choices": [{"delta": {"role": "assistant"}, "index": 0, "finish_reason": None}], <9> "id": chunk_id, <10> "model": model, <11> "created": 1, <12> }, <13> ] <14> # Split at << to simulate chunked responses <15> if answer.find("<<") > -1: <16> parts = answer.split("<<") <17> self.responses.append( <18> { <19> "object": "chat.completion.chunk", <20> "choices": [ <21> { <22> "delta": {"role": "assistant", "content": parts[0] + "<<"}, <23> "index": 0, <24> "finish_reason": None, <25> } <26> ], <27> "id": chunk_id, <28> "model": model, <29> "created": 1, <30> } <31> ) <32> self.responses.append( <33> { <34> "object": "chat.completion.chunk", <35> "choices": [ <36> {"delta": {"role": "assistant", "content": parts[1]}, "index": 0, "finish_reason": None} <37> ], <38> "id": chunk_id, <39> "model": model, <40> "created": 1, <41> } <42> ) <43> self.responses.append( <44> { <45> "object": "chat.completion.chunk", <46> "choices": [{"delta": {"role": None, "content":</s>
===========below chunk 0=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): # offset: 1 "id": chunk_id, "model": model, "created": 1, } ) else: self.responses.append( { "object": "chat.completion.chunk", "choices": [{"delta": {"content": answer}, "index": 0, "finish_reason": None}], "id": chunk_id, "model": model, "created": 1, } ) def __aiter__(self): return self async def __anext__(self): if self.responses: return ChatCompletionChunk.model_validate(self.responses.pop(0)) else: raise StopAsyncIteration async def mock_acreate(*args, **kwargs): messages = kwargs["messages"] last_question = messages[-1]["content"] if last_question == "Generate search query for: What is the capital of France?": answer = "capital of France" elif last_question == "Generate search query for: Are interest rates high?": answer = "interest rates" elif isinstance(last_question, list) and last_question[2].get("image_url"): answer = "From the provided sources, the impact of interest rates and GDP growth on financial markets can be observed through the line graph. [Financial Market Analysis Report 2023-7.png]" else: answer = "The capital of France is Paris. [Benefit_Options-2.pdf]." if messages[0]["content"].find("Generate 3 very brief follow-up questions") > -1: answer = "The capital of France is Paris. [Benefit_Options-2.pdf]. <<What is the capital of Spain?>>" if "stream" in kwargs and kwargs["stream"] is True: return AsyncChatCompletionIterator(answer) else: return ChatCompletion( </s> ===========below chunk 1=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): # offset: 2 <s> and kwargs["stream"] is True: return AsyncChatCompletionIterator(answer) else: return ChatCompletion( object="chat.completion", choices=[ Choice( message=ChatCompletionMessage(role="assistant", content=answer), finish_reason="stop", index=0 ) ], id="test-123", created=0, model="test-model", ) def patch(openai_client): monkeypatch.setattr(openai_client.chat.completions, "create", mock_acreate) return patch ===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: q = messages[-1]["content"] if not isinstance(q, str): raise ValueError("The most recent message content must be a string.") overrides = context.get("overrides", {}) + seed = overrides.get("seed", None) auth_claims = context.get("auth_claims", {}) use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_ranker = True if overrides.get("semantic_ranker") else False use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) filter = self.build_filter(overrides, auth_claims) vector_fields = overrides.get("vector_fields", ["embedding"]) send_text_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] send_images_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "images", None] # If retrieval mode includes vectors, compute an embedding for the query vectors = [] if use_vector_search: for field in vector_fields: vector = ( await self.compute_text_embedding(q) if field == "embedding" else await self.compute_image_embedding(q) ) </s> ===========changed ref 1=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 <s>(q) if field == "embedding" else await self.compute_image_embedding(q) ) vectors.append(vector) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if send_text_to_gptvision: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if send_images_to_gptvision: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) response_token_limit = 1024 updated_messages = build_messages( model=self.gpt4v_model, system_prompt=overrides.get("prompt_template", self.system_chat_template_gpt4v</s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
96a84178f365fcfbd016a8c8c84c22d99ede9788
Add an optional seed parameter (#1814)
<4>:<add> seed = overrides.get("seed", None)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: <0> q = messages[-1]["content"] <1> if not isinstance(q, str): <2> raise ValueError("The most recent message content must be a string.") <3> overrides = context.get("overrides", {}) <4> auth_claims = context.get("auth_claims", {}) <5> use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] <6> use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <7> use_semantic_ranker = True if overrides.get("semantic_ranker") else False <8> use_semantic_captions = True if overrides.get("semantic_captions") else False <9> top = overrides.get("top", 3) <10> minimum_search_score = overrides.get("minimum_search_score", 0.0) <11> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <12> filter = self.build_filter(overrides, auth_claims) <13> <14> # If retrieval mode includes vectors, compute an embedding for the query <15> vectors: list[VectorQuery] = [] <16> if use_vector_search: <17> vectors.append(await self.compute_text_embedding(q)) <18> <19> results = await self.search( <20> top, <21> q, <22> filter, <23> vectors, <24> use_text_search, <25> use_vector_search, <26> use_semantic_ranker, <27> use_semantic_captions, <28> minimum_search_score, <29> minimum_reranker_score, <30> ) <31> <32> # Process results <33> sources_content = self.get_sources</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 # Append user message content = "\n".join(sources_content) user_content = q + "\n" + f"Sources:\n {content}" response_token_limit = 1024 updated_messages = build_messages( model=self.chatgpt_model, system_prompt=overrides.get("prompt_template", self.system_chat_template), few_shots=[{"role": "user", "content": self.question}, {"role": "assistant", "content": self.answer}], new_user_content=user_content, max_tokens=self.chatgpt_token_limit - response_token_limit, ) chat_completion = ( await self.openai_client.chat.completions.create( # Azure OpenAI takes the deployment name as the model name model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, ) ).model_dump() data_points = {"text": sources_content} extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "use_vector_search": use_vector_search, "use_text_search": use_text_search</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s>, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [str(message) for message in updated_messages], ( {"model": self.chatgpt_model, "deployment": self.chatgpt_deployment} if self.chatgpt_deployment else {"model": self.chatgpt_model} ), ), ], } completion = {} completion["message"] = chat_completion["choices"][0]["message"] completion["context"] = extra_info completion["session_state"] = session_state return completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.chatgpt_token_limit = get_token_limit(chatgpt_model) at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] search(top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_text_search: bool, use_vector_search: bool, use_semantic_ranker: bool, use_semantic_captions: bool, minimum_search_score: Optional[float], minimum_reranker_score: Optional[float]) -> List[Document] get_sources_content(results: List[Document], use_semantic_captions: bool, use_image_citation: bool) -> list[str] compute_text_embedding(q: str) run(self, messages: list[ChatCompletionMessageParam], session_state: Any=None, context: dict[str, Any]={}) -> dict[str, Any] at: approaches.approach.Document id: Optional[str] content: Optional[str] embedding: Optional[List[float]] image_embedding: Optional[List[float]] category: Optional[str] sourcepage: Optional[str] sourcefile: Optional[str] oids: Optional[List[str]] groups: Optional[List[str]] captions: List[QueryCaptionResult] score: Optional[float] = None reranker_score: Optional[float] = None serialize_for_results() -> dict[str, Any] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_seed(client, snapshot): + response = await client.post( + "/chat", + json={ + "messages": [{"content": "What is the capital of France?", "role": "user"}], + "context": { + "overrides": {"seed": 42}, + }, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") +
app.backend.app/JSONEncoder.default
Modified
Azure-Samples~azure-search-openai-demo
23c60da736c99d8fad5d2f4b66acb83e9e8ea066
Bump the python-requirements group across 1 directory with 21 updates (#1851)
<0>:<add> if dataclasses.is_dataclass(o) and not isinstance(o, type): <del> if dataclasses.is_dataclass(o):
# module: app.backend.app class JSONEncoder(json.JSONEncoder): def default(self, o): <0> if dataclasses.is_dataclass(o): <1> return dataclasses.asdict(o) <2> return super().default(o) <3>
app.backend.app/content_file
Modified
Azure-Samples~azure-search-openai-demo
28ffa99b70533429d1c1079f611f1962747c03fc
Fix logging (#1874)
<12>:<add> current_app.logger.info("Opening file %s", path) <del> logging.info("Opening file %s", path) <18>:<add> current_app.logger.info("Path not found in general Blob container: %s", path) <del> logging.info("Path not found in general Blob container: %s", path)
# module: app.backend.app @bp.route("/content/<path>") @authenticated_path async def content_file(path: str, auth_claims: Dict[str, Any]): <0> """ <1> Serve content files from blob storage from within the app to keep the example self-contained. <2> *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in <3> if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control <4> if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to <5> This is also slow and memory hungry. <6> """ <7> # Remove page number from path, filename-1.txt -> filename.txt <8> # This shouldn't typically be necessary as browsers don't send hash fragments to servers <9> if path.find("#page=") > 0: <10> path_parts = path.rsplit("#page=", 1) <11> path = path_parts[0] <12> logging.info("Opening file %s", path) <13> blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] <14> blob: Union[BlobDownloader, DatalakeDownloader] <15> try: <16> blob = await blob_container_client.get_blob_client(path).download_blob() <17> except ResourceNotFoundError: <18> logging.info("Path not found in general Blob container: %s", path) <19> if current_app.config[CONFIG_USER_UPLOAD_ENABLED]: <20> try: <21> user_oid = auth_claims["oid"] <22> user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] <23> user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid) <24> file_client = user_directory_client.get_file_client(path) <25> blob = await file_client.download_file() <26> except ResourceNotFoundError:</s>
===========below chunk 0=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path async def content_file(path: str, auth_claims: Dict[str, Any]): # offset: 1 abort(404) else: abort(404) if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() await blob.readinto(blob_file) blob_file.seek(0) return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) ===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: config CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_USER_UPLOAD_ENABLED = "user_upload_enabled" CONFIG_USER_BLOB_CONTAINER_CLIENT = "user_blob_container_client" at: decorators authenticated_path(route_fn: Callable[[str, Dict[str, Any]], Any]) at: io BytesIO(initial_bytes: bytes=...) at: io.BytesIO seek(self, offset: int, whence: int=..., /) -> int at: mimetypes guess_type(url: Union[Text, PathLike[str]], strict: bool=...) -> Tuple[Optional[str], Optional[str]] at: typing Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== + # module: app.backend.custom_uvicorn_worker + + ===========changed ref 1=========== + # module: app.backend.custom_uvicorn_worker + class CustomUvicornWorker(UvicornWorker): + CONFIG_KWARGS = { + "log_config": logconfig_dict, + } + ===========changed ref 2=========== + # module: app.backend.custom_uvicorn_worker + logconfig_dict = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "default": { + "()": "uvicorn.logging.DefaultFormatter", + "format": "%(asctime)s - %(levelname)s - %(message)s", + }, + "access": { + "()": "uvicorn.logging.AccessFormatter", + "format": "%(asctime)s - %(message)s", + }, + }, + "handlers": { + "default": { + "formatter": "default", + "class": "logging.StreamHandler", + "stream": "ext://sys.stderr", + }, + "access": { + "formatter": "access", + "class": "logging.StreamHandler", + "stream": "ext://sys.stdout", + }, + }, + "loggers": { + "root": {"handlers": ["default"]}, + "uvicorn.error": { + "level": "INFO", + "handlers": ["default"], + "propagate": False, + }, + "uvicorn.access": { + "level": "INFO", + "handlers": ["access"], + "propagate": False, + }, + }, + } +
app.backend.app/speech
Modified
Azure-Samples~azure-search-openai-demo
28ffa99b70533429d1c1079f611f1962747c03fc
Fix logging (#1874)
# module: app.backend.app @bp.route("/speech", methods=["POST"]) async def speech(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> <3> speech_token = current_app.config.get(CONFIG_SPEECH_SERVICE_TOKEN) <4> if speech_token is None or speech_token.expires_on < time.time() + 60: <5> speech_token = await current_app.config[CONFIG_CREDENTIAL].get_token( <6> "https://cognitiveservices.azure.com/.default" <7> ) <8> current_app.config[CONFIG_SPEECH_SERVICE_TOKEN] = speech_token <9> <10> request_json = await request.get_json() <11> text = request_json["text"] <12> try: <13> # Construct a token as described in documentation: <14> # https://learn.microsoft.com/azure/ai-services/speech-service/how-to-configure-azure-ad-auth?pivots=programming-language-python <15> auth_token = ( <16> "aad#" <17> + current_app.config[CONFIG_SPEECH_SERVICE_ID] <18> + "#" <19> + current_app.config[CONFIG_SPEECH_SERVICE_TOKEN].token <20> ) <21> speech_config = SpeechConfig(auth_token=auth_token, region=current_app.config[CONFIG_SPEECH_SERVICE_LOCATION]) <22> speech_config.speech_synthesis_voice_name = current_app.config[CONFIG_SPEECH_SERVICE_VOICE] <23> speech_config.speech_synthesis_output_format = SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3 <24> synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=None) <25> result: SpeechSynthesisResult = synthesizer.speak_text_async(text).get() <26> if result.reason == ResultReason.Synthesizing</s>
===========below chunk 0=========== # module: app.backend.app @bp.route("/speech", methods=["POST"]) async def speech(): # offset: 1 return result.audio_data, 200, {"Content-Type": "audio/mp3"} elif result.reason == ResultReason.Canceled: cancellation_details = result.cancellation_details current_app.logger.error( "Speech synthesis canceled: %s %s", cancellation_details.reason, cancellation_details.error_details ) raise Exception("Speech synthesis canceled. Check logs for details.") else: current_app.logger.error("Unexpected result reason: %s", result.reason) raise Exception("Speech synthesis failed. Check logs for details.") except Exception as e: logging.exception("Exception in /speech") return jsonify({"error": str(e)}), 500 ===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: config CONFIG_CREDENTIAL = "azure_credential" CONFIG_SPEECH_SERVICE_ID = "speech_service_id" CONFIG_SPEECH_SERVICE_LOCATION = "speech_service_location" CONFIG_SPEECH_SERVICE_TOKEN = "speech_service_token" CONFIG_SPEECH_SERVICE_VOICE = "speech_service_voice" at: time time() -> float ===========changed ref 0=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path async def content_file(path: str, auth_claims: Dict[str, Any]): """ Serve content files from blob storage from within the app to keep the example self-contained. *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to This is also slow and memory hungry. """ # Remove page number from path, filename-1.txt -> filename.txt # This shouldn't typically be necessary as browsers don't send hash fragments to servers if path.find("#page=") > 0: path_parts = path.rsplit("#page=", 1) path = path_parts[0] + current_app.logger.info("Opening file %s", path) - logging.info("Opening file %s", path) blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] blob: Union[BlobDownloader, DatalakeDownloader] try: blob = await blob_container_client.get_blob_client(path).download_blob() except ResourceNotFoundError: + current_app.logger.info("Path not found in general Blob container: %s", path) - logging.info("Path not found in general Blob container: %s", path) if current_app.config[CONFIG_USER_UPLOAD_ENABLED]: try: user_oid = auth_claims["oid"] user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid) file_client = user_directory_client.get_file_client(path) </s> ===========changed ref 1=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path async def content_file(path: str, auth_claims: Dict[str, Any]): # offset: 1 <s>directory_client(user_oid) file_client = user_directory_client.get_file_client(path) blob = await file_client.download_file() except ResourceNotFoundError: + current_app.logger.exception("Path not found in DataLake: %s", path) - logging.exception("Path not found in DataLake: %s", path) abort(404) else: abort(404) if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() await blob.readinto(blob_file) blob_file.seek(0) return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) ===========changed ref 2=========== + # module: app.backend.custom_uvicorn_worker + + ===========changed ref 3=========== + # module: app.backend.custom_uvicorn_worker + class CustomUvicornWorker(UvicornWorker): + CONFIG_KWARGS = { + "log_config": logconfig_dict, + } + ===========changed ref 4=========== + # module: app.backend.custom_uvicorn_worker + logconfig_dict = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "default": { + "()": "uvicorn.logging.DefaultFormatter", + "format": "%(asctime)s - %(levelname)s - %(message)s", + }, + "access": { + "()": "uvicorn.logging.AccessFormatter", + "format": "%(asctime)s - %(message)s", + }, + }, + "handlers": { + "default": { + "formatter": "default", + "class": "logging.StreamHandler", + "stream": "ext://sys.stderr", + }, + "access": { + "formatter": "access", + "class": "logging.StreamHandler", + "stream": "ext://sys.stdout", + }, + }, + "loggers": { + "root": {"handlers": ["default"]}, + "uvicorn.error": { + "level": "INFO", + "handlers": ["default"], + "propagate": False, + }, + "uvicorn.access": { + "level": "INFO", + "handlers": ["access"], + "propagate": False, + }, + }, + } +
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
28ffa99b70533429d1c1079f611f1962747c03fc
Fix logging (#1874)
<4>:<add> app.logger.info("APPLICATIONINSIGHTS_CONNECTION_STRING is set, enabling Azure Monitor") <14>:<add> # Log levels should be one of https://docs.python.org/3/library/logging.html#logging-levels <del> # Level should be one of https://docs.python.org/3/library/logging.html#logging-levels <15>:<add> # Set root level to WARNING to avoid seeing overly verbose logs from SDKS <add> logging.basicConfig(level=logging.WARNING) <add> # Set the app logger level to INFO by default <del> default_level = "INFO" # In development, log more verbosely <16>:<del> if os.getenv("WEBSITE_HOSTNAME"): # In production, don't log as heavily <17>:<add> default_level = "INFO" <del> default_level = "WARNING" <18>:<add> app.logger.setLevel(os.getenv("APP_LOG_LEVEL", default_level)) <del> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", default_level)) <21>:<add> app.logger.info("ALLOWED_ORIGIN is set, enabling CORS for %s", allowed_origin) <del> app.logger.info("CORS enabled for %s", allowed_origin)
# module: app.backend.app def create_app(): <0> app = Quart(__name__) <1> app.register_blueprint(bp) <2> <3> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <4> configure_azure_monitor() <5> # This tracks HTTP requests made by aiohttp: <6> AioHttpClientInstrumentor().instrument() <7> # This tracks HTTP requests made by httpx: <8> HTTPXClientInstrumentor().instrument() <9> # This tracks OpenAI SDK requests: <10> OpenAIInstrumentor().instrument() <11> # This middleware tracks app route requests: <12> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) # type: ignore[assignment] <13> <14> # Level should be one of https://docs.python.org/3/library/logging.html#logging-levels <15> default_level = "INFO" # In development, log more verbosely <16> if os.getenv("WEBSITE_HOSTNAME"): # In production, don't log as heavily <17> default_level = "WARNING" <18> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", default_level)) <19> <20> if allowed_origin := os.getenv("ALLOWED_ORIGIN"): <21> app.logger.info("CORS enabled for %s", allowed_origin) <22> cors(app, allow_origin=allowed_origin, allow_methods=["GET", "POST"]) <23> return app <24>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: config CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_USER_BLOB_CONTAINER_CLIENT = "user_blob_container_client" CONFIG_SEARCH_CLIENT = "search_client" at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: app.backend.app @bp.route("/speech", methods=["POST"]) async def speech(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 speech_token = current_app.config.get(CONFIG_SPEECH_SERVICE_TOKEN) if speech_token is None or speech_token.expires_on < time.time() + 60: speech_token = await current_app.config[CONFIG_CREDENTIAL].get_token( "https://cognitiveservices.azure.com/.default" ) current_app.config[CONFIG_SPEECH_SERVICE_TOKEN] = speech_token request_json = await request.get_json() text = request_json["text"] try: # Construct a token as described in documentation: # https://learn.microsoft.com/azure/ai-services/speech-service/how-to-configure-azure-ad-auth?pivots=programming-language-python auth_token = ( "aad#" + current_app.config[CONFIG_SPEECH_SERVICE_ID] + "#" + current_app.config[CONFIG_SPEECH_SERVICE_TOKEN].token ) speech_config = SpeechConfig(auth_token=auth_token, region=current_app.config[CONFIG_SPEECH_SERVICE_LOCATION]) speech_config.speech_synthesis_voice_name = current_app.config[CONFIG_SPEECH_SERVICE_VOICE] speech_config.speech_synthesis_output_format = SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3 synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=None) result: SpeechSynthesisResult = synthesizer.speak_text_async(text).get() if result.reason == ResultReason.SynthesizingAudioCompleted: return result.audio_data, 200, {"Content-Type": "audio/mp3"} </s> ===========changed ref 1=========== # module: app.backend.app @bp.route("/speech", methods=["POST"]) async def speech(): # offset: 1 <s>SynthesizingAudioCompleted: return result.audio_data, 200, {"Content-Type": "audio/mp3"} elif result.reason == ResultReason.Canceled: cancellation_details = result.cancellation_details current_app.logger.error( "Speech synthesis canceled: %s %s", cancellation_details.reason, cancellation_details.error_details ) raise Exception("Speech synthesis canceled. Check logs for details.") else: current_app.logger.error("Unexpected result reason: %s", result.reason) raise Exception("Speech synthesis failed. Check logs for details.") except Exception as e: + current_app.logger.exception("Exception in /speech") - logging.exception("Exception in /speech") return jsonify({"error": str(e)}), 500 ===========changed ref 2=========== + # module: app.backend.custom_uvicorn_worker + + ===========changed ref 3=========== + # module: app.backend.custom_uvicorn_worker + class CustomUvicornWorker(UvicornWorker): + CONFIG_KWARGS = { + "log_config": logconfig_dict, + } + ===========changed ref 4=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path async def content_file(path: str, auth_claims: Dict[str, Any]): """ Serve content files from blob storage from within the app to keep the example self-contained. *** NOTE *** if you are using app services authentication, this route will return unauthorized to all users that are not logged in if AZURE_ENFORCE_ACCESS_CONTROL is not set or false, logged in users can access all files regardless of access control if AZURE_ENFORCE_ACCESS_CONTROL is set to true, logged in users can only access files they have access to This is also slow and memory hungry. """ # Remove page number from path, filename-1.txt -> filename.txt # This shouldn't typically be necessary as browsers don't send hash fragments to servers if path.find("#page=") > 0: path_parts = path.rsplit("#page=", 1) path = path_parts[0] + current_app.logger.info("Opening file %s", path) - logging.info("Opening file %s", path) blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] blob: Union[BlobDownloader, DatalakeDownloader] try: blob = await blob_container_client.get_blob_client(path).download_blob() except ResourceNotFoundError: + current_app.logger.info("Path not found in general Blob container: %s", path) - logging.info("Path not found in general Blob container: %s", path) if current_app.config[CONFIG_USER_UPLOAD_ENABLED]: try: user_oid = auth_claims["oid"] user_blob_container_client = current_app.config[CONFIG_USER_BLOB_CONTAINER_CLIENT] user_directory_client: FileSystemClient = user_blob_container_client.get_directory_client(user_oid) file_client = user_directory_client.get_file_client(path) </s> ===========changed ref 5=========== # module: app.backend.app @bp.route("/content/<path>") @authenticated_path async def content_file(path: str, auth_claims: Dict[str, Any]): # offset: 1 <s>directory_client(user_oid) file_client = user_directory_client.get_file_client(path) blob = await file_client.download_file() except ResourceNotFoundError: + current_app.logger.exception("Path not found in DataLake: %s", path) - logging.exception("Path not found in DataLake: %s", path) abort(404) else: abort(404) if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() await blob.readinto(blob_file) blob_file.seek(0) return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path)
tests.test_app_config/test_app_azure_custom_key
Modified
Azure-Samples~azure-search-openai-demo
1837d5f42ab4e9ba1d1c4785b8f00bb91e7b0a17
Rename Azure OpenAI key variable (#1880)
<2>:<add> monkeypatch.setenv("AZURE_OPENAI_API_KEY_OVERRIDE", "azure-api-key") <del> monkeypatch.setenv("AZURE_OPENAI_API_KEY", "azure-api-key")
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_azure_custom_key(monkeypatch, minimal_env): <0> monkeypatch.setenv("OPENAI_HOST", "azure_custom") <1> monkeypatch.setenv("AZURE_OPENAI_CUSTOM_URL", "http://azureapi.com/api/v1") <2> monkeypatch.setenv("AZURE_OPENAI_API_KEY", "azure-api-key") <3> <4> quart_app = app.create_app() <5> async with quart_app.test_app(): <6> assert quart_app.config[app.CONFIG_OPENAI_CLIENT].api_key == "azure-api-key" <7> assert quart_app.config[app.CONFIG_OPENAI_CLIENT].base_url == "http://azureapi.com/api/v1/openai/" <8>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None]
app.backend.core.authentication/AuthenticationHelper.validate_access_token
Modified
Azure-Samples~azure-search-openai-demo
a8b1202045294052bf86bb7e523d25ef270c0d8c
Replace python-jose with pyjwt (#1875)
<20>:<add> raise AuthError("Unable to get keys to validate auth token.", 401) <del> raise AuthError({"code": "invalid_keys", "description": "Unable to get keys to validate auth token."}, 401) <26>:<del> unverified_header = jwt.get_unverified_header(token) <27>:<del> unverified_claims = jwt.get_unverified_claims(token) <28>:<add> unverified_claims = jwt.decode(token, options={"verify_signature": False}) <30>:<del> for key in jwks["keys"]: <31>:<del> if key["kid"] == unverified_header["kid"]: <32>:<del> rsa_key = {"kty": key["kty"], "kid": key["kid"], "use": key["use"], "n": key["n"], "e": key["e"]} <33>:<del> break <34>:<del> except Exception as exc: <35>:<del> raise AuthError( <36>:<del> {"code": "invalid_header", "description": "Unable to parse authorization token."}, 401 <37>:<del> ) from exc
# module: app.backend.core.authentication class AuthenticationHelper: def validate_access_token(self, token: str): <0> """ <1> Validate an access token is issued by Entra <2> """ <3> jwks = None <4> async for attempt in AsyncRetrying( <5> retry=retry_if_exception_type(AuthError), <6> wait=wait_random_exponential(min=15, max=60), <7> stop=stop_after_attempt(5), <8> ): <9> with attempt: <10> async with aiohttp.ClientSession() as session: <11> async with session.get(url=self.key_url) as resp: <12> resp_status = resp.status <13> if resp_status in [500, 502, 503, 504]: <14> raise AuthError( <15> error=f"Failed to get keys info: {await resp.text()}", status_code=resp_status <16> ) <17> jwks = await resp.json() <18> <19> if not jwks or "keys" not in jwks: <20> raise AuthError({"code": "invalid_keys", "description": "Unable to get keys to validate auth token."}, 401) <21> <22> rsa_key = None <23> issuer = None <24> audience = None <25> try: <26> unverified_header = jwt.get_unverified_header(token) <27> unverified_claims = jwt.get_unverified_claims(token) <28> issuer = unverified_claims.get("iss") <29> audience = unverified_claims.get("aud") <30> for key in jwks["keys"]: <31> if key["kid"] == unverified_header["kid"]: <32> rsa_key = {"kty": key["kty"], "kid": key["kid"], "use": key["use"], "n": key["n"], "e": key["e"]} <33> break <34> except Exception as exc: <35> raise AuthError( <36> {"code": "invalid_header", "description": "Unable to parse authorization token."}, 401 <37> ) from exc </s>
===========below chunk 0=========== # module: app.backend.core.authentication class AuthenticationHelper: def validate_access_token(self, token: str): # offset: 1 raise AuthError({"code": "invalid_header", "description": "Unable to find appropriate key"}, 401) if issuer not in self.valid_issuers: raise AuthError( {"code": "invalid_header", "description": f"Issuer {issuer} not in {','.join(self.valid_issuers)}"}, 401 ) if audience not in self.valid_audiences: raise AuthError( { "code": "invalid_header", "description": f"Audience {audience} not in {','.join(self.valid_audiences)}", }, 401, ) try: jwt.decode(token, rsa_key, algorithms=["RS256"], audience=audience, issuer=issuer) except ExpiredSignatureError as jwt_expired_exc: raise AuthError({"code": "token_expired", "description": "token is expired"}, 401) from jwt_expired_exc except JWTClaimsError as jwt_claims_exc: raise AuthError( {"code": "invalid_claims", "description": "incorrect claims," "please check the audience and issuer"}, 401, ) from jwt_claims_exc except Exception as exc: raise AuthError( {"code": "invalid_header", "description": "Unable to parse authorization token."}, 401 ) from exc ===========changed ref 0=========== # module: tests.test_authenticationhelper + def create_mock_jwt(kid="mock_kid", oid="OID_X"): + # Create a payload with necessary claims + payload = { + "iss": "https://login.microsoftonline.com/TENANT_ID/v2.0", + "sub": "AaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaA", + "aud": "SERVER_APP", + "exp": int((datetime.utcnow() + timedelta(hours=1)).timestamp()), + "iat": int(datetime.utcnow().timestamp()), + "nbf": int(datetime.utcnow().timestamp()), + "name": "John Doe", + "oid": oid, + "preferred_username": "[email protected]", + "rh": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA.", + "tid": "22222222-2222-2222-2222-222222222222", + "uti": "AbCdEfGhIjKlMnOp-ABCDEFG", + "ver": "2.0", + } + + # Create a header + header = {"kid": kid, "alg": "RS256", "typ": "JWT"} + + # Create a mock private key (for signing) + private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048) + + # Create the JWT + token = jwt.encode(payload, private_key, algorithm="RS256", headers=header) + + return token, private_key.public_key(), payload + ===========changed ref 1=========== # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_validate_access_token(monkeypatch, mock_confidential_client_success): + mock_token, public_key, payload = create_mock_jwt(oid="OID_X") + + def mock_get(*args, **kwargs): + return MockResponse( + status=200, + text=json.dumps( + { + "keys": [ + { + "kty": "RSA", + "use": "sig", + "kid": "23nt", + "x5t": "23nt", + "n": "hu2SJ", + "e": "AQAB", + "x5c": ["MIIC/jCC"], + "issuer": "https://login.microsoftonline.com/TENANT_ID/v2.0", + }, + { + "kty": "RSA", + "use": "sig", + "kid": "MGLq", + "x5t": "MGLq", + "n": "yfNcG8", + "e": "AQAB", + "x5c": ["MIIC/jCC"], + "issuer": "https://login.microsoftonline.com/TENANT_ID/v2.0", + }, + ] + } + ), + ) + + monkeypatch.setattr(aiohttp.ClientSession, "get", mock_get) + + def mock_decode(*args, **kwargs): + return payload + + monkeypatch.setattr(jwt, "decode", mock_decode) + + async def mock_create_pem_format(*args, **kwargs): + return public_key + + monkeypatch.setattr(AuthenticationHelper, "create_pem_format", mock_create_pem_format) + + helper =</s> ===========changed ref 2=========== # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_validate_access_token(monkeypatch, mock_confidential_client_success): # offset: 1 <s>patch.setattr(AuthenticationHelper, "create_pem_format", mock_create_pem_format) + + helper = create_authentication_helper() + await helper.validate_access_token(mock_token) + ===========changed ref 3=========== # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_create_pem_format(mock_confidential_client_success, mock_validate_token_success): + helper = create_authentication_helper() + mock_token, public_key, payload = create_mock_jwt(oid="OID_X") + _, other_public_key, _ = create_mock_jwt(oid="OID_Y") + mock_jwks = { + "keys": [ + # Include a key with a different KID to ensure the correct key is selected + { + "kty": "RSA", + "kid": "other_mock_kid", + "use": "sig", + "n": base64.urlsafe_b64encode( + other_public_key.public_numbers().n.to_bytes( + (other_public_key.public_numbers().n.bit_length() + 7) // 8, byteorder="big" + ) + ) + .decode("utf-8") + .rstrip("="), + "e": base64.urlsafe_b64encode( + other_public_key.public_numbers().e.to_bytes( + (other_public_key.public_numbers().e.bit_length() + 7) // 8, byteorder="big" + ) + ) + .decode("utf-8") + .rstrip("="), + }, + { + "kty": "RSA", + "kid": "mock_kid", + "use": "sig", + "n": base64.urlsafe_b64encode( + public_key.public_numbers().n.to_bytes( + (public_key.public_numbers().n.bit_length() + 7) // 8, byteorder="big" + ) + ) + .decode("utf-8") + .rstrip("="), + "e": base64.urlsafe_</s>
app.backend.approaches.chatapproach/ChatApproach.extract_followup_questions
Modified
Azure-Samples~azure-search-openai-demo
55b0961b98bf22b44fc6fd45c9ef0fc583c14f6e
Upgrade openai, openai-messages-token-helper, for gpt-4o-mini support (#1893)
<0>:<add> if content is None: <add> return content, []
# module: app.backend.approaches.chatapproach class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: Optional[str]): - def extract_followup_questions(self, content: str): <0> return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) <1>
===========unchanged ref 0=========== at: app.backend.approaches.chatapproach.ChatApproach query_prompt_few_shots: list[ChatCompletionMessageParam] = [ {"role": "user", "content": "How did crypto do last year?"}, {"role": "assistant", "content": "Summarize Cryptocurrency Market Dynamics from last year"}, {"role": "user", "content": "What are my health plans?"}, {"role": "assistant", "content": "Show available health plans"}, ] NO_RESPONSE = "0" follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next. Enclose the follow-up questions in double angle brackets. Example: <<Are there exclusions for prescriptions?>> <<Which pharmacies can be ordered from?>> <<What is the limit for over-the-counter medication?>> Do no repeat questions that have already been asked. Make sure the last question ends with ">>". """ query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. You have access to Azure AI Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """
app.backend.approaches.chatapproach/ChatApproach.run_without_streaming
Modified
Azure-Samples~azure-search-openai-demo
55b0961b98bf22b44fc6fd45c9ef0fc583c14f6e
Upgrade openai, openai-messages-token-helper, for gpt-4o-mini support (#1893)
<4>:<del> chat_resp = chat_completion_response.model_dump() # Convert to dict to make it JSON serializable <5>:<del> chat_resp = chat_resp["choices"][0] <6>:<del> chat_resp["context"] = extra_info <7>:<add> content = chat_completion_response.choices[0].message.content <add> role = chat_completion_response.choices[0].message.role <8>:<add> content, followup_questions = self.extract_followup_questions(content) <del> content, followup_questions = self.extract_followup_questions(chat_resp["message"]["content"]) <9>:<del> chat_resp["message"]["content"] = content <10>:<add> extra_info["followup_questions"] = followup_questions <del> chat_resp["context"]["followup_questions"] = followup_questions <11>:<add> chat_app_response = { <add> "message": {"content": content, "role": role}, <add> "context": extra_info, <add> "session_state": session_state, <del> chat_resp["session_state"] = session_state <12>:<add> } <add> return chat_app_response <del> return chat_resp
# module: app.backend.approaches.chatapproach class ChatApproach(Approach, ABC): def run_without_streaming( self, messages: list[ChatCompletionMessageParam], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> messages, overrides, auth_claims, should_stream=False <2> ) <3> chat_completion_response: ChatCompletion = await chat_coroutine <4> chat_resp = chat_completion_response.model_dump() # Convert to dict to make it JSON serializable <5> chat_resp = chat_resp["choices"][0] <6> chat_resp["context"] = extra_info <7> if overrides.get("suggest_followup_questions"): <8> content, followup_questions = self.extract_followup_questions(chat_resp["message"]["content"]) <9> chat_resp["message"]["content"] = content <10> chat_resp["context"]["followup_questions"] = followup_questions <11> chat_resp["session_state"] = session_state <12> return chat_resp <13>
===========unchanged ref 0=========== at: app.backend.approaches.chatapproach.ChatApproach run_until_final_call(messages, overrides, auth_claims, should_stream) -> tuple extract_followup_questions(content: Optional[str]) extract_followup_questions(self, content: Optional[str]) at: re findall(pattern: Pattern[AnyStr], string: AnyStr, flags: _FlagsType=...) -> List[Any] findall(pattern: AnyStr, string: AnyStr, flags: _FlagsType=...) -> List[Any] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatapproach class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: Optional[str]): - def extract_followup_questions(self, content: str): + if content is None: + return content, [] return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content)
app.backend.approaches.chatapproach/ChatApproach.run_with_streaming
Modified
Azure-Samples~azure-search-openai-demo
55b0961b98bf22b44fc6fd45c9ef0fc583c14f6e
Upgrade openai, openai-messages-token-helper, for gpt-4o-mini support (#1893)
<11>:<add> completion = { <add> "delta": { <add> "content": event["choices"][0]["delta"].get("content"), <add> "role": event["choices"][0]["delta"]["role"], <add> } <add> } <del> completion = {"delta": event["choices"][0]["delta"]}
# module: app.backend.approaches.chatapproach class ChatApproach(Approach, ABC): def run_with_streaming( self, messages: list[ChatCompletionMessageParam], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: <0> extra_info, chat_coroutine = await self.run_until_final_call( <1> messages, overrides, auth_claims, should_stream=True <2> ) <3> yield {"delta": {"role": "assistant"}, "context": extra_info, "session_state": session_state} <4> <5> followup_questions_started = False <6> followup_content = "" <7> async for event_chunk in await chat_coroutine: <8> # "2023-07-01-preview" API version has a bug where first response has empty choices <9> event = event_chunk.model_dump() # Convert pydantic model to dict <10> if event["choices"]: <11> completion = {"delta": event["choices"][0]["delta"]} <12> # if event contains << and not >>, it is start of follow-up question, truncate <13> content = completion["delta"].get("content") <14> content = content or "" # content may either not exist in delta, or explicitly be None <15> if overrides.get("suggest_followup_questions") and "<<" in content: <16> followup_questions_started = True <17> earlier_content = content[: content.index("<<")] <18> if earlier_content: <19> completion["delta"]["content"] = earlier_content <20> yield completion <21> followup_content += content[content.index("<<") :] <22> elif followup_questions_started: <23> followup_content += content <24> else: <25> yield completion <26> if followup_content: <27> _, followup_questions = self.extract_followup_questions(followup_content) <28> yield {"delta": {"role": "assistant"</s>
===========below chunk 0=========== # module: app.backend.approaches.chatapproach class ChatApproach(Approach, ABC): def run_with_streaming( self, messages: list[ChatCompletionMessageParam], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> AsyncGenerator[dict, None]: # offset: 1 ===========unchanged ref 0=========== at: app.backend.approaches.chatapproach.ChatApproach run_until_final_call(messages, overrides, auth_claims, should_stream) -> tuple at: app.backend.approaches.chatapproach.ChatApproach.run_without_streaming chat_app_response = { "message": {"content": content, "role": role}, "context": extra_info, "session_state": session_state, } at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatapproach class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: Optional[str]): - def extract_followup_questions(self, content: str): + if content is None: + return content, [] return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content) ===========changed ref 1=========== # module: app.backend.approaches.chatapproach class ChatApproach(Approach, ABC): def run_without_streaming( self, messages: list[ChatCompletionMessageParam], overrides: dict[str, Any], auth_claims: dict[str, Any], session_state: Any = None, ) -> dict[str, Any]: extra_info, chat_coroutine = await self.run_until_final_call( messages, overrides, auth_claims, should_stream=False ) chat_completion_response: ChatCompletion = await chat_coroutine - chat_resp = chat_completion_response.model_dump() # Convert to dict to make it JSON serializable - chat_resp = chat_resp["choices"][0] - chat_resp["context"] = extra_info + content = chat_completion_response.choices[0].message.content + role = chat_completion_response.choices[0].message.role if overrides.get("suggest_followup_questions"): + content, followup_questions = self.extract_followup_questions(content) - content, followup_questions = self.extract_followup_questions(chat_resp["message"]["content"]) - chat_resp["message"]["content"] = content + extra_info["followup_questions"] = followup_questions - chat_resp["context"]["followup_questions"] = followup_questions + chat_app_response = { + "message": {"content": content, "role": role}, + "context": extra_info, + "session_state": session_state, - chat_resp["session_state"] = session_state + } + return chat_app_response - return chat_resp
app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.run
Modified
Azure-Samples~azure-search-openai-demo
55b0961b98bf22b44fc6fd45c9ef0fc583c14f6e
Upgrade openai, openai-messages-token-helper, for gpt-4o-mini support (#1893)
# module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: <0> q = messages[-1]["content"] <1> if not isinstance(q, str): <2> raise ValueError("The most recent message content must be a string.") <3> <4> overrides = context.get("overrides", {}) <5> seed = overrides.get("seed", None) <6> auth_claims = context.get("auth_claims", {}) <7> use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] <8> use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <9> use_semantic_ranker = True if overrides.get("semantic_ranker") else False <10> use_semantic_captions = True if overrides.get("semantic_captions") else False <11> top = overrides.get("top", 3) <12> minimum_search_score = overrides.get("minimum_search_score", 0.0) <13> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <14> filter = self.build_filter(overrides, auth_claims) <15> <16> vector_fields = overrides.get("vector_fields", ["embedding"]) <17> send_text_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] <18> send_images_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "images", None] <19> <20> # If retrieval mode includes vectors, compute an embedding for the query <21> vectors = [] <22> if use_vector_search: <23> for field in vector_fields: <24> vector = ( <25> await self.compute_text_embedding(q) <26> </s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 else await self.compute_image_embedding(q) ) vectors.append(vector) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if send_text_to_gptvision: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if send_images_to_gptvision: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) response_token_limit = 1024 updated_messages = build_messages( model=self.gpt4v_model, system_prompt=overrides.get("prompt_template", self.system_chat_template_gpt4v), new_user_content=user_content, max_tokens=self.gpt4v_token_limit - response_token_limit, ) chat_completion =</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s> max_tokens=self.gpt4v_token_limit - response_token_limit, ) chat_completion = ( await self.openai_client.chat.completions.create( model=self.gpt4v_deployment if self.gpt4v_deployment else self.gpt4v_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, seed=seed, ) ).model_dump() data_points = { "text": sources_content, "images": [d["image_url"] for d in image_list], } extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "vector_fields": vector_fields, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [str(message) for message in updated_messages], ( </s> ===========below chunk 2=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 3 <s>model": self.gpt4v_model, "deployment": self.gpt4v_deployment} if self.gpt4v_deployment else {"model": self.gpt4v_model} ), ), ], } completion = {} completion["message"] = chat_completion["choices"][0]["message"] completion["context"] = extra_info completion["session_state"] = session_state return completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach system_chat_template_gpt4v = ( "You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. " + "Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name> " + "Each text source starts in a new line and has the file name followed by colon and the actual information " + "Always include the source name from the image or text for each fact you use in the response in the format: [filename] " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "The text and image source can be the same file name, don't use the image title when citing the image source, only use the file name as mentioned " + "If you cannot answer using the sources below, say you don't know. Return just the answer without any input texts " ) at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach.__init__ self.blob_container_client = blob_container_client self.openai_client = openai_client self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.gpt4v_token_limit = get_token_limit(gpt4v_model) at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
55b0961b98bf22b44fc6fd45c9ef0fc583c14f6e
Upgrade openai, openai-messages-token-helper, for gpt-4o-mini support (#1893)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: <0> q = messages[-1]["content"] <1> if not isinstance(q, str): <2> raise ValueError("The most recent message content must be a string.") <3> overrides = context.get("overrides", {}) <4> seed = overrides.get("seed", None) <5> auth_claims = context.get("auth_claims", {}) <6> use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] <7> use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <8> use_semantic_ranker = True if overrides.get("semantic_ranker") else False <9> use_semantic_captions = True if overrides.get("semantic_captions") else False <10> top = overrides.get("top", 3) <11> minimum_search_score = overrides.get("minimum_search_score", 0.0) <12> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <13> filter = self.build_filter(overrides, auth_claims) <14> <15> # If retrieval mode includes vectors, compute an embedding for the query <16> vectors: list[VectorQuery] = [] <17> if use_vector_search: <18> vectors.append(await self.compute_text_embedding(q)) <19> <20> results = await self.search( <21> top, <22> q, <23> filter, <24> vectors, <25> use_text_search, <26> use_vector_search, <27> use_semantic_ranker, <28> use_semantic_captions, <29> minimum_search_score, <30> minimum_reranker_score, <31> ) <32> <33> # Process</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=False) # Append user message content = "\n".join(sources_content) user_content = q + "\n" + f"Sources:\n {content}" response_token_limit = 1024 updated_messages = build_messages( model=self.chatgpt_model, system_prompt=overrides.get("prompt_template", self.system_chat_template), few_shots=[{"role": "user", "content": self.question}, {"role": "assistant", "content": self.answer}], new_user_content=user_content, max_tokens=self.chatgpt_token_limit - response_token_limit, ) chat_completion = ( await self.openai_client.chat.completions.create( # Azure OpenAI takes the deployment name as the model name model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, seed=seed, ) ).model_dump() data_points = {"text": sources_content} extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s>_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [str(message) for message in updated_messages], ( {"model": self.chatgpt_model, "deployment": self.chatgpt_deployment} if self.chatgpt_deployment else {"model": self.chatgpt_model} ), ), ], } completion = {} completion["message"] = chat_completion["choices"][0]["message"] completion["context"] = extra_info completion["session_state"] = session_state return completion ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.chatgpt_token_limit = get_token_limit(chatgpt_model) at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] search(top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_text_search: bool, use_vector_search: bool, use_semantic_ranker: bool, use_semantic_captions: bool, minimum_search_score: Optional[float], minimum_reranker_score: Optional[float]) -> List[Document] get_sources_content(results: List[Document], use_semantic_captions: bool, use_image_citation: bool) -> list[str] compute_text_embedding(q: str) run(self, messages: list[ChatCompletionMessageParam], session_state: Any=None, context: dict[str, Any]={}) -> dict[str, Any] at: approaches.approach.Document id: Optional[str] content: Optional[str] embedding: Optional[List[float]] image_embedding: Optional[List[float]] category: Optional[str] sourcepage: Optional[str] sourcefile: Optional[str] oids: Optional[List[str]] groups: Optional[List[str]] captions: List[QueryCaptionResult] score: Optional[float] = None reranker_score: Optional[float] = None serialize_for_results() -> dict[str, Any] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatapproach class ChatApproach(Approach, ABC): + def extract_followup_questions(self, content: Optional[str]): - def extract_followup_questions(self, content: str): + if content is None: + return content, [] return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content)
app.backend.approaches.chatreadretrievereadvision/ChatReadRetrieveReadVisionApproach.system_message_chat_conversation
Modified
Azure-Samples~azure-search-openai-demo
781bf21dfa8546b11987ce41345f6a4d6a2a078a
Add Markdown Render Support to GPT completions (#56)
<8>:<del> For tabular information return it as an html table. Do not return markdown format.
# module: app.backend.approaches.chatreadretrievereadvision class ChatReadRetrieveReadVisionApproach(ChatApproach): @property def system_message_chat_conversation(self): <0> return """ <1> You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. <2> Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name> <3> Each text source starts in a new line and has the file name followed by colon and the actual information <4> Always include the source name from the image or text for each fact you use in the response in the format: [filename] <5> Answer the following question using only the data provided in the sources below. <6> If asking a clarifying question to the user would help, ask the question. <7> Be brief in your answers. <8> For tabular information return it as an html table. Do not return markdown format. <9> The text and image source can be the same file name, don't use the image title when citing the image source, only use the file name as mentioned <10> If you cannot answer using the sources below, say you don't know. Return just the answer without any input texts. <11> {follow_up_questions_prompt} <12> {injected_prompt} <13> """ <14>
===========unchanged ref 0=========== at: approaches.chatapproach.ChatApproach query_prompt_few_shots: list[ChatCompletionMessageParam] = [ {"role": "user", "content": "How did crypto do last year?"}, {"role": "assistant", "content": "Summarize Cryptocurrency Market Dynamics from last year"}, {"role": "user", "content": "What are my health plans?"}, {"role": "assistant", "content": "Show available health plans"}, ] NO_RESPONSE = "0" follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next. Enclose the follow-up questions in double angle brackets. Example: <<Are there exclusions for prescriptions?>> <<Which pharmacies can be ordered from?>> <<What is the limit for over-the-counter medication?>> Do no repeat questions that have already been asked. Make sure the last question ends with ">>". """ query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. You have access to Azure AI Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.system_message_chat_conversation
Modified
Azure-Samples~azure-search-openai-demo
781bf21dfa8546b11987ce41345f6a4d6a2a078a
Add Markdown Render Support to GPT completions (#56)
<2>:<add> If the question is not in English, answer in the language used in the question. <del> For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(ChatApproach): @property def system_message_chat_conversation(self): <0> return """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. <1> Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. <2> For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. <3> Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. <4> {follow_up_questions_prompt} <5> {injected_prompt} <6> """ <7>
===========unchanged ref 0=========== at: approaches.chatapproach.ChatApproach query_prompt_few_shots: list[ChatCompletionMessageParam] = [ {"role": "user", "content": "How did crypto do last year?"}, {"role": "assistant", "content": "Summarize Cryptocurrency Market Dynamics from last year"}, {"role": "user", "content": "What are my health plans?"}, {"role": "assistant", "content": "Show available health plans"}, ] NO_RESPONSE = "0" follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next. Enclose the follow-up questions in double angle brackets. Example: <<Are there exclusions for prescriptions?>> <<Which pharmacies can be ordered from?>> <<What is the limit for over-the-counter medication?>> Do no repeat questions that have already been asked. Make sure the last question ends with ">>". """ query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. You have access to Azure AI Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrievereadvision class ChatReadRetrieveReadVisionApproach(ChatApproach): @property def system_message_chat_conversation(self): return """ You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name> Each text source starts in a new line and has the file name followed by colon and the actual information Always include the source name from the image or text for each fact you use in the response in the format: [filename] Answer the following question using only the data provided in the sources below. If asking a clarifying question to the user would help, ask the question. Be brief in your answers. - For tabular information return it as an html table. Do not return markdown format. The text and image source can be the same file name, don't use the image title when citing the image source, only use the file name as mentioned If you cannot answer using the sources below, say you don't know. Return just the answer without any input texts. {follow_up_questions_prompt} {injected_prompt} """
app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.run
Modified
Azure-Samples~azure-search-openai-demo
eb5627c68e40edc894d1ec69d182ab855bd0fb5d
Update test snapshots (#1948)
# module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: <0> q = messages[-1]["content"] <1> if not isinstance(q, str): <2> raise ValueError("The most recent message content must be a string.") <3> <4> overrides = context.get("overrides", {}) <5> seed = overrides.get("seed", None) <6> auth_claims = context.get("auth_claims", {}) <7> use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] <8> use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <9> use_semantic_ranker = True if overrides.get("semantic_ranker") else False <10> use_semantic_captions = True if overrides.get("semantic_captions") else False <11> top = overrides.get("top", 3) <12> minimum_search_score = overrides.get("minimum_search_score", 0.0) <13> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <14> filter = self.build_filter(overrides, auth_claims) <15> <16> vector_fields = overrides.get("vector_fields", ["embedding"]) <17> send_text_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] <18> send_images_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "images", None] <19> <20> # If retrieval mode includes vectors, compute an embedding for the query <21> vectors = [] <22> if use_vector_search: <23> for field in vector_fields: <24> vector = ( <25> await self.compute_text_embedding(q) <26> </s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 else await self.compute_image_embedding(q) ) vectors.append(vector) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if send_text_to_gptvision: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if send_images_to_gptvision: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) response_token_limit = 1024 updated_messages = build_messages( model=self.gpt4v_model, system_prompt=overrides.get("prompt_template", self.system_chat_template_gpt4v), new_user_content=user_content, max_tokens=self.gpt4v_token_limit - response_token_limit, ) chat_completion =</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s> max_tokens=self.gpt4v_token_limit - response_token_limit, ) chat_completion = await self.openai_client.chat.completions.create( model=self.gpt4v_deployment if self.gpt4v_deployment else self.gpt4v_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, seed=seed, ) data_points = { "text": sources_content, "images": [d["image_url"] for d in image_list], } extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "vector_fields": vector_fields, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [str(message) for message in updated_messages], ( {"model": self.gpt4v_</s> ===========below chunk 2=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 3 <s> "deployment": self.gpt4v_deployment} if self.gpt4v_deployment else {"model": self.gpt4v_model} ), ), ], } return { "message": { "content": chat_completion.choices[0].message.content, "role": chat_completion.choices[0].message.role, }, "context": extra_info, "session_state": session_state, } ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach system_chat_template_gpt4v = ( "You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. " + "Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name> " + "Each text source starts in a new line and has the file name followed by colon and the actual information " + "Always include the source name from the image or text for each fact you use in the response in the format: [filename] " + "Answer the following question using only the data provided in the sources below. " + "The text and image source can be the same file name, don't use the image title when citing the image source, only use the file name as mentioned " + "If you cannot answer using the sources below, say you don't know. Return just the answer without any input texts " ) at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach.__init__ self.blob_container_client = blob_container_client self.openai_client = openai_client self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.gpt4v_token_limit = get_token_limit(gpt4v_model) at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]
tests.test_app/test_chat_with_history
Modified
Azure-Samples~azure-search-openai-demo
eb5627c68e40edc894d1ec69d182ab855bd0fb5d
Update test snapshots (#1948)
<18>:<add> assert messages_contains_text(result["context"]["thoughts"][3]["description"], "performance review") <del> assert thought_contains_text(result["context"]["thoughts"][3], "performance review")
# module: tests.test_app @pytest.mark.asyncio async def test_chat_with_history(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "messages": [ <4> {"content": "What happens in a performance review?", "role": "user"}, <5> { <6> "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <7> "role": "assistant", <8> }, <9> {"content": "Is dental covered?", "role": "user"}, <10> ], <11> "context": { <12> "overrides": {"retrieval_mode": "text"}, <13> }, <14> }, <15> ) <16> assert response.status_code == 200 <17> result = await response.get_json() <18> assert thought_contains_text(result["context"]["thoughts"][3], "performance review") <19> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <20>
===========unchanged ref 0=========== at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app messages_contains_text(messages, text) ===========changed ref 0=========== # module: tests.test_app + def messages_contains_text(messages, text): + for message in messages: + if text in message["content"]: + return True + return False + ===========changed ref 1=========== # module: tests.test_app - def thought_contains_text(thought, text): - description = thought["description"] - if isinstance(description, str) and text in description: - return True - elif isinstance(description, list) and any(text in item for item in description): - return True - return False - ===========changed ref 2=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: q = messages[-1]["content"] if not isinstance(q, str): raise ValueError("The most recent message content must be a string.") overrides = context.get("overrides", {}) seed = overrides.get("seed", None) auth_claims = context.get("auth_claims", {}) use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_ranker = True if overrides.get("semantic_ranker") else False use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) filter = self.build_filter(overrides, auth_claims) vector_fields = overrides.get("vector_fields", ["embedding"]) send_text_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] send_images_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "images", None] # If retrieval mode includes vectors, compute an embedding for the query vectors = [] if use_vector_search: for field in vector_fields: vector = ( await self.compute_text_embedding(q) if field == "embedding" else await self.compute_image_embedding(q) ) vectors</s> ===========changed ref 3=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 <s>q) if field == "embedding" else await self.compute_image_embedding(q) ) vectors.append(vector) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if send_text_to_gptvision: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if send_images_to_gptvision: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) response_token_limit = 1024 updated_messages = build_messages( model=self.gpt4v_model, system_prompt=overrides.get("prompt_template", self.system_chat_template_gpt4v),</s> ===========changed ref 4=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s> new_user_content=user_content, max_tokens=self.gpt4v_token_limit - response_token_limit, ) chat_completion = await self.openai_client.chat.completions.create( model=self.gpt4v_deployment if self.gpt4v_deployment else self.gpt4v_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, seed=seed, ) data_points = { "text": sources_content, "images": [d["image_url"] for d in image_list], } extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "vector_fields": vector_fields, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", + updated_messages, - [str(message) for message</s> ===========changed ref 5=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 3 <s>_messages], ( {"model": self.gpt4v_model, "deployment": self.gpt4v_deployment} if self.gpt4v_deployment else {"model": self.gpt4v_model} ), ), ], } return { "message": { "content": chat_completion.choices[0].message.content, "role": chat_completion.choices[0].message.role, }, "context": extra_info, "session_state": session_state, }
tests.test_app/test_chat_with_long_history
Modified
Azure-Samples~azure-search-openai-demo
eb5627c68e40edc894d1ec69d182ab855bd0fb5d
Update test snapshots (#1948)
<22>:<add> assert not messages_contains_text(result["context"]["thoughts"][3]["description"], "Is there a dress code?") <del> assert not thought_contains_text(result["context"]["thoughts"][3], "Is there a dress code?")
# module: tests.test_app @pytest.mark.asyncio async def test_chat_with_long_history(client, snapshot, caplog): <0> """This test makes sure that the history is truncated to max tokens minus 1024.""" <1> caplog.set_level(logging.DEBUG) <2> response = await client.post( <3> "/chat", <4> json={ <5> "messages": [ <6> {"role": "user", "content": "Is there a dress code?"}, # 9 tokens <7> { <8> "role": "assistant", <9> "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]" <10> * 150, <11> }, # 3900 tokens <12> {"role": "user", "content": "What does a product manager do?"}, # 10 tokens <13> ], <14> "context": { <15> "overrides": {"retrieval_mode": "text"}, <16> }, <17> }, <18> ) <19> assert response.status_code == 200 <20> result = await response.get_json() <21> # Assert that it doesn't find the first message, since it wouldn't fit in the max tokens. <22> assert not thought_contains_text(result["context"]["thoughts"][3], "Is there a dress code?") <23> assert "Reached max tokens" in caplog.text <24> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <25>
===========unchanged ref 0=========== at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: logging DEBUG = 10 at: tests.test_app messages_contains_text(messages, text) ===========changed ref 0=========== # module: tests.test_app + def messages_contains_text(messages, text): + for message in messages: + if text in message["content"]: + return True + return False + ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_with_history(client, snapshot): response = await client.post( "/chat", json={ "messages": [ {"content": "What happens in a performance review?", "role": "user"}, { "content": "During a performance review, employees will receive feedback on their performance over the past year, including both successes and areas for improvement. The feedback will be provided by the employee's supervisor and is intended to help the employee develop and grow in their role [employee_handbook-3.pdf]. The review is a two-way dialogue between the employee and their manager, so employees are encouraged to be honest and open during the process [employee_handbook-3.pdf]. The employee will also have the opportunity to discuss their goals and objectives for the upcoming year [employee_handbook-3.pdf]. A written summary of the performance review will be provided to the employee, which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", "role": "assistant", }, {"content": "Is dental covered?", "role": "user"}, ], "context": { "overrides": {"retrieval_mode": "text"}, }, }, ) assert response.status_code == 200 result = await response.get_json() + assert messages_contains_text(result["context"]["thoughts"][3]["description"], "performance review") - assert thought_contains_text(result["context"]["thoughts"][3], "performance review") snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app - def thought_contains_text(thought, text): - description = thought["description"] - if isinstance(description, str) and text in description: - return True - elif isinstance(description, list) and any(text in item for item in description): - return True - return False - ===========changed ref 3=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: q = messages[-1]["content"] if not isinstance(q, str): raise ValueError("The most recent message content must be a string.") overrides = context.get("overrides", {}) seed = overrides.get("seed", None) auth_claims = context.get("auth_claims", {}) use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_ranker = True if overrides.get("semantic_ranker") else False use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) filter = self.build_filter(overrides, auth_claims) vector_fields = overrides.get("vector_fields", ["embedding"]) send_text_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] send_images_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "images", None] # If retrieval mode includes vectors, compute an embedding for the query vectors = [] if use_vector_search: for field in vector_fields: vector = ( await self.compute_text_embedding(q) if field == "embedding" else await self.compute_image_embedding(q) ) vectors</s> ===========changed ref 4=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 <s>q) if field == "embedding" else await self.compute_image_embedding(q) ) vectors.append(vector) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if send_text_to_gptvision: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if send_images_to_gptvision: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) response_token_limit = 1024 updated_messages = build_messages( model=self.gpt4v_model, system_prompt=overrides.get("prompt_template", self.system_chat_template_gpt4v),</s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
eb5627c68e40edc894d1ec69d182ab855bd0fb5d
Update test snapshots (#1948)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: <0> q = messages[-1]["content"] <1> if not isinstance(q, str): <2> raise ValueError("The most recent message content must be a string.") <3> overrides = context.get("overrides", {}) <4> seed = overrides.get("seed", None) <5> auth_claims = context.get("auth_claims", {}) <6> use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] <7> use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <8> use_semantic_ranker = True if overrides.get("semantic_ranker") else False <9> use_semantic_captions = True if overrides.get("semantic_captions") else False <10> top = overrides.get("top", 3) <11> minimum_search_score = overrides.get("minimum_search_score", 0.0) <12> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <13> filter = self.build_filter(overrides, auth_claims) <14> <15> # If retrieval mode includes vectors, compute an embedding for the query <16> vectors: list[VectorQuery] = [] <17> if use_vector_search: <18> vectors.append(await self.compute_text_embedding(q)) <19> <20> results = await self.search( <21> top, <22> q, <23> filter, <24> vectors, <25> use_text_search, <26> use_vector_search, <27> use_semantic_ranker, <28> use_semantic_captions, <29> minimum_search_score, <30> minimum_reranker_score, <31> ) <32> <33> # Process</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=False) # Append user message content = "\n".join(sources_content) user_content = q + "\n" + f"Sources:\n {content}" response_token_limit = 1024 updated_messages = build_messages( model=self.chatgpt_model, system_prompt=overrides.get("prompt_template", self.system_chat_template), few_shots=[{"role": "user", "content": self.question}, {"role": "assistant", "content": self.answer}], new_user_content=user_content, max_tokens=self.chatgpt_token_limit - response_token_limit, ) chat_completion = await self.openai_client.chat.completions.create( # Azure OpenAI takes the deployment name as the model name model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, seed=seed, ) data_points = {"text": sources_content} extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, </s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s>_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", [str(message) for message in updated_messages], ( {"model": self.chatgpt_model, "deployment": self.chatgpt_deployment} if self.chatgpt_deployment else {"model": self.chatgpt_model} ), ), ], } return { "message": { "content": chat_completion.choices[0].message.content, "role": chat_completion.choices[0].message.role, }, "context": extra_info, "session_state": session_state, } ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client self.chatgpt_model = chatgpt_model self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========unchanged ref 1=========== at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] search(top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_text_search: bool, use_vector_search: bool, use_semantic_ranker: bool, use_semantic_captions: bool, minimum_search_score: Optional[float], minimum_reranker_score: Optional[float]) -> List[Document] get_sources_content(results: List[Document], use_semantic_captions: bool, use_image_citation: bool) -> list[str] compute_text_embedding(q: str) run(self, messages: list[ChatCompletionMessageParam], session_state: Any=None, context: dict[str, Any]={}) -> dict[str, Any] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_app + def messages_contains_text(messages, text): + for message in messages: + if text in message["content"]: + return True + return False + ===========changed ref 1=========== # module: tests.test_app - def thought_contains_text(thought, text): - description = thought["description"] - if isinstance(description, str) and text in description: - return True - elif isinstance(description, list) and any(text in item for item in description): - return True - return False -
tests.conftest/mock_env
Modified
Azure-Samples~azure-search-openai-demo
9073b65bc86dcffaba5a6c8d332b14e0102912b7
Frontend multi-language support #1690 (#1790)
<5>:<add> monkeypatch.setenv("ENABLE_LANGUAGE_PICKER", "true")
# module: tests.conftest @pytest.fixture(params=envs, ids=["client0", "client1"]) def mock_env(monkeypatch, request): <0> with mock.patch.dict(os.environ, clear=True): <1> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <2> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <3> monkeypatch.setenv("AZURE_STORAGE_RESOURCE_GROUP", "test-storage-rg") <4> monkeypatch.setenv("AZURE_SUBSCRIPTION_ID", "test-storage-subid") <5> monkeypatch.setenv("USE_SPEECH_INPUT_BROWSER", "true") <6> monkeypatch.setenv("USE_SPEECH_OUTPUT_AZURE", "true") <7> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <8> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <9> monkeypatch.setenv("AZURE_SPEECH_SERVICE_ID", "test-id") <10> monkeypatch.setenv("AZURE_SPEECH_SERVICE_LOCATION", "eastus") <11> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <12> monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com") <13> for key, value in request.param.items(): <14> monkeypatch.setenv(key, value) <15> if os.getenv("AZURE_USE_AUTHENTICATION") is not None: <16> monkeypatch.delenv("AZURE_USE_AUTHENTICATION") <17> <18> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <19> mock_default_azure_credential.return_value = MockAzureCredential() <20> yield <21>
app.backend.app/config
Modified
Azure-Samples~azure-search-openai-demo
9073b65bc86dcffaba5a6c8d332b14e0102912b7
Frontend multi-language support #1690 (#1790)
<6>:<add> "showLanguagePicker": current_app.config[CONFIG_LANGUAGE_PICKER_ENABLED],
# module: app.backend.app @bp.route("/config", methods=["GET"]) def config(): <0> return jsonify( <1> { <2> "showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED], <3> "showSemanticRankerOption": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED], <4> "showVectorOption": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED], <5> "showUserUpload": current_app.config[CONFIG_USER_UPLOAD_ENABLED], <6> "showSpeechInput": current_app.config[CONFIG_SPEECH_INPUT_ENABLED], <7> "showSpeechOutputBrowser": current_app.config[CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED], <8> "showSpeechOutputAzure": current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED], <9> } <10> ) <11>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: config CONFIG_USER_UPLOAD_ENABLED = "user_upload_enabled" CONFIG_GPT4V_DEPLOYED = "gpt4v_deployed" CONFIG_SEMANTIC_RANKER_DEPLOYED = "semantic_ranker_deployed" CONFIG_VECTOR_SEARCH_ENABLED = "vector_search_enabled" CONFIG_LANGUAGE_PICKER_ENABLED = "language_picker_enabled" CONFIG_SPEECH_INPUT_ENABLED = "speech_input_enabled" CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED = "speech_output_browser_enabled" CONFIG_SPEECH_OUTPUT_AZURE_ENABLED = "speech_output_azure_enabled" ===========changed ref 0=========== # module: tests.conftest @pytest.fixture(params=envs, ids=["client0", "client1"]) def mock_env(monkeypatch, request): with mock.patch.dict(os.environ, clear=True): monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") monkeypatch.setenv("AZURE_STORAGE_RESOURCE_GROUP", "test-storage-rg") monkeypatch.setenv("AZURE_SUBSCRIPTION_ID", "test-storage-subid") + monkeypatch.setenv("ENABLE_LANGUAGE_PICKER", "true") monkeypatch.setenv("USE_SPEECH_INPUT_BROWSER", "true") monkeypatch.setenv("USE_SPEECH_OUTPUT_AZURE", "true") monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") monkeypatch.setenv("AZURE_SPEECH_SERVICE_ID", "test-id") monkeypatch.setenv("AZURE_SPEECH_SERVICE_LOCATION", "eastus") monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com") for key, value in request.param.items(): monkeypatch.setenv(key, value) if os.getenv("AZURE_USE_AUTHENTICATION") is not None: monkeypatch.delenv("AZURE_USE_AUTHENTICATION") with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: mock_default_azure_credential.return_value = MockAzureCredential() yield
tests.e2e/run_server
Modified
Azure-Samples~azure-search-openai-demo
9073b65bc86dcffaba5a6c8d332b14e0102912b7
Frontend multi-language support #1690 (#1790)
<7>:<add> "ENABLE_LANGUAGE_PICKER": "false",
# module: tests.e2e def run_server(port: int): <0> with mock.patch.dict( <1> os.environ, <2> { <3> "AZURE_STORAGE_ACCOUNT": "test-storage-account", <4> "AZURE_STORAGE_CONTAINER": "test-storage-container", <5> "AZURE_STORAGE_RESOURCE_GROUP": "test-storage-rg", <6> "AZURE_SUBSCRIPTION_ID": "test-storage-subid", <7> "USE_SPEECH_INPUT_BROWSER": "false", <8> "USE_SPEECH_OUTPUT_AZURE": "false", <9> "AZURE_SEARCH_INDEX": "test-search-index", <10> "AZURE_SEARCH_SERVICE": "test-search-service", <11> "AZURE_SPEECH_SERVICE_ID": "test-id", <12> "AZURE_SPEECH_SERVICE_LOCATION": "eastus", <13> "AZURE_OPENAI_SERVICE": "test-openai-service", <14> "AZURE_OPENAI_CHATGPT_MODEL": "gpt-35-turbo", <15> }, <16> clear=True, <17> ): <18> uvicorn.run(app.create_app(), port=port) <19>
===========changed ref 0=========== # module: app.backend.app @bp.route("/config", methods=["GET"]) def config(): return jsonify( { "showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED], "showSemanticRankerOption": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED], "showVectorOption": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED], "showUserUpload": current_app.config[CONFIG_USER_UPLOAD_ENABLED], + "showLanguagePicker": current_app.config[CONFIG_LANGUAGE_PICKER_ENABLED], "showSpeechInput": current_app.config[CONFIG_SPEECH_INPUT_ENABLED], "showSpeechOutputBrowser": current_app.config[CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED], "showSpeechOutputAzure": current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED], } ) ===========changed ref 1=========== # module: tests.conftest @pytest.fixture(params=envs, ids=["client0", "client1"]) def mock_env(monkeypatch, request): with mock.patch.dict(os.environ, clear=True): monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") monkeypatch.setenv("AZURE_STORAGE_RESOURCE_GROUP", "test-storage-rg") monkeypatch.setenv("AZURE_SUBSCRIPTION_ID", "test-storage-subid") + monkeypatch.setenv("ENABLE_LANGUAGE_PICKER", "true") monkeypatch.setenv("USE_SPEECH_INPUT_BROWSER", "true") monkeypatch.setenv("USE_SPEECH_OUTPUT_AZURE", "true") monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") monkeypatch.setenv("AZURE_SPEECH_SERVICE_ID", "test-id") monkeypatch.setenv("AZURE_SPEECH_SERVICE_LOCATION", "eastus") monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com") for key, value in request.param.items(): monkeypatch.setenv(key, value) if os.getenv("AZURE_USE_AUTHENTICATION") is not None: monkeypatch.delenv("AZURE_USE_AUTHENTICATION") with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: mock_default_azure_credential.return_value = MockAzureCredential() yield ===========changed ref 2=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # Replace these with your own values, either in environment variables or directly here AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] AZURE_USERSTORAGE_ACCOUNT = os.environ.get("AZURE_USERSTORAGE_ACCOUNT") AZURE_USERSTORAGE_CONTAINER = os.environ.get("AZURE_USERSTORAGE_CONTAINER") AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] # Shared by all OpenAI deployments OPENAI_HOST = os.getenv("OPENAI_HOST", "azure") OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] OPENAI_EMB_MODEL = os.getenv("AZURE_OPENAI_EMB_MODEL_NAME", "text-embedding-ada-002") OPENAI_EMB_DIMENSIONS = int(os.getenv("AZURE_OPENAI_EMB_DIMENSIONS", 1536)) # Used with Azure OpenAI deployments AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") AZURE_OPENAI_GPT4V_DEPLOYMENT = os.environ.get("AZURE_OPENAI_GPT4V_DEPLOYMENT") AZURE_OPENAI_GPT4V_MODEL = os.environ.get("AZURE_OPENAI_GPT4V_MODEL") AZURE_OPENAI_CHATGPT_DEPLOYMENT = ( os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") if OPENAI_HOST.startswith("azure") else None ) AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") if OPENAI</s> ===========changed ref 3=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 <s>AI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") if OPENAI_HOST.startswith("azure") else None AZURE_OPENAI_CUSTOM_URL = os.getenv("AZURE_OPENAI_CUSTOM_URL") AZURE_VISION_ENDPOINT = os.getenv("AZURE_VISION_ENDPOINT", "") # Used only with non-Azure OpenAI deployments OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") OPENAI_ORGANIZATION = os.getenv("OPENAI_ORGANIZATION") AZURE_TENANT_ID = os.getenv("AZURE_TENANT_ID") AZURE_USE_AUTHENTICATION = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" AZURE_ENFORCE_ACCESS_CONTROL = os.getenv("AZURE_ENFORCE_ACCESS_CONTROL", "").lower() == "true" AZURE_ENABLE_GLOBAL_DOCUMENT_ACCESS = os.getenv("AZURE_ENABLE_GLOBAL_DOCUMENT_ACCESS", "").lower() == "true" AZURE_ENABLE_UNAUTHENTICATED_ACCESS = os.getenv("AZURE_ENABLE_UNAUTHENTICATED_ACCESS", "").lower() == "true" AZURE_SERVER_APP_ID = os.getenv("AZURE_SERVER_APP_ID") AZURE_SERVER_APP_SECRET = os.getenv("AZURE_SERVER_APP_SECRET") AZURE_CLIENT_APP_ID = os.getenv("AZURE_CLIENT_APP_ID") AZURE_AUTH_TENANT_ID = os.getenv("AZURE_AUTH_TENANT_ID", AZURE_TENANT_ID) KB_FIELDS_CONTENT = os.</s>
tests.conftest/mock_env
Modified
Azure-Samples~azure-search-openai-demo
2333426632d5f4d30fd733fdbf09b58197589d88
Better support for deploying to non-home tenant (#1964)
<19>:<add> with mock.patch("app.AzureDeveloperCliCredential") as mock_default_azure_credential: <del> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential:
# module: tests.conftest @pytest.fixture(params=envs, ids=["client0", "client1"]) def mock_env(monkeypatch, request): <0> with mock.patch.dict(os.environ, clear=True): <1> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <2> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <3> monkeypatch.setenv("AZURE_STORAGE_RESOURCE_GROUP", "test-storage-rg") <4> monkeypatch.setenv("AZURE_SUBSCRIPTION_ID", "test-storage-subid") <5> monkeypatch.setenv("ENABLE_LANGUAGE_PICKER", "true") <6> monkeypatch.setenv("USE_SPEECH_INPUT_BROWSER", "true") <7> monkeypatch.setenv("USE_SPEECH_OUTPUT_AZURE", "true") <8> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <9> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <10> monkeypatch.setenv("AZURE_SPEECH_SERVICE_ID", "test-id") <11> monkeypatch.setenv("AZURE_SPEECH_SERVICE_LOCATION", "eastus") <12> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <13> monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com") <14> for key, value in request.param.items(): <15> monkeypatch.setenv(key, value) <16> if os.getenv("AZURE_USE_AUTHENTICATION") is not None: <17> monkeypatch.delenv("AZURE_USE_AUTHENTICATION") <18> <19> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <20> mock_default_azure_credential.return_value = MockAzureCredential() <21> yield <22>
tests.conftest/auth_client
Modified
Azure-Samples~azure-search-openai-demo
2333426632d5f4d30fd733fdbf09b58197589d88
Better support for deploying to non-home tenant (#1964)
<14>:<add> with mock.patch("app.AzureDeveloperCliCredential") as mock_default_azure_credential: <del> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential:
# module: tests.conftest @pytest_asyncio.fixture(params=auth_envs) async def auth_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, request, ): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <5> monkeypatch.setenv("USE_USER_UPLOAD", "true") <6> monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-userstorage-account") <7> monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-userstorage-container") <8> monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") <9> monkeypatch.setenv("USE_LOCAL_HTML_PARSER", "true") <10> monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-documentintelligence-service") <11> for key, value in request.param.items(): <12> monkeypatch.setenv(key, value) <13> <14> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <15> mock_default_azure_credential.return_value = MockAzureCredential() <16> quart_app = app.create_app() <17> <18> async with quart_app.test_app() as test_app: <19> quart_app.config.update({"TESTING": True}) <20> mock</s>
===========below chunk 0=========== # module: tests.conftest @pytest_asyncio.fixture(params=auth_envs) async def auth_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, request, ): # offset: 1 mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) client = test_app.test_client() client.config = quart_app.config yield client ===========changed ref 0=========== # module: tests.conftest @pytest.fixture(params=envs, ids=["client0", "client1"]) def mock_env(monkeypatch, request): with mock.patch.dict(os.environ, clear=True): monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") monkeypatch.setenv("AZURE_STORAGE_RESOURCE_GROUP", "test-storage-rg") monkeypatch.setenv("AZURE_SUBSCRIPTION_ID", "test-storage-subid") monkeypatch.setenv("ENABLE_LANGUAGE_PICKER", "true") monkeypatch.setenv("USE_SPEECH_INPUT_BROWSER", "true") monkeypatch.setenv("USE_SPEECH_OUTPUT_AZURE", "true") monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") monkeypatch.setenv("AZURE_SPEECH_SERVICE_ID", "test-id") monkeypatch.setenv("AZURE_SPEECH_SERVICE_LOCATION", "eastus") monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com") for key, value in request.param.items(): monkeypatch.setenv(key, value) if os.getenv("AZURE_USE_AUTHENTICATION") is not None: monkeypatch.delenv("AZURE_USE_AUTHENTICATION") + with mock.patch("app.AzureDeveloperCliCredential") as mock_default_azure_credential: - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: mock_default_azure_credential.return_value = MockAzureCredential() yield
tests.conftest/auth_public_documents_client
Modified
Azure-Samples~azure-search-openai-demo
2333426632d5f4d30fd733fdbf09b58197589d88
Better support for deploying to non-home tenant (#1964)
<14>:<add> with mock.patch("app.AzureDeveloperCliCredential") as mock_default_azure_credential: <del> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential:
# module: tests.conftest @pytest_asyncio.fixture(params=auth_public_envs) async def auth_public_documents_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, request, ): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <5> monkeypatch.setenv("USE_USER_UPLOAD", "true") <6> monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-userstorage-account") <7> monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-userstorage-container") <8> monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") <9> monkeypatch.setenv("USE_LOCAL_HTML_PARSER", "true") <10> monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-documentintelligence-service") <11> for key, value in request.param.items(): <12> monkeypatch.setenv(key, value) <13> <14> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <15> mock_default_azure_credential.return_value = MockAzureCredential() <16> quart_app = app.create_app() <17> <18> async with quart_app.test_app() as test_app: <19> quart_app.config.update({"TESTING":</s>
===========below chunk 0=========== # module: tests.conftest @pytest_asyncio.fixture(params=auth_public_envs) async def auth_public_documents_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, request, ): # offset: 1 mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) client = test_app.test_client() client.config = quart_app.config yield client ===========changed ref 0=========== # module: tests.conftest @pytest_asyncio.fixture(params=auth_envs) async def auth_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, request, ): monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") monkeypatch.setenv("USE_USER_UPLOAD", "true") monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-userstorage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-userstorage-container") monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") monkeypatch.setenv("USE_LOCAL_HTML_PARSER", "true") monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-documentintelligence-service") for key, value in request.param.items(): monkeypatch.setenv(key, value) + with mock.patch("app.AzureDeveloperCliCredential") as mock_default_azure_credential: - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: mock_default_azure_credential.return_value = MockAzureCredential() quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update({"TESTING":</s> ===========changed ref 1=========== # module: tests.conftest @pytest_asyncio.fixture(params=auth_envs) async def auth_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, request, ): # offset: 1 <s> with quart_app.test_app() as test_app: quart_app.config.update({"TESTING": True}) mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) client = test_app.test_client() client.config = quart_app.config yield client ===========changed ref 2=========== # module: tests.conftest @pytest.fixture(params=envs, ids=["client0", "client1"]) def mock_env(monkeypatch, request): with mock.patch.dict(os.environ, clear=True): monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") monkeypatch.setenv("AZURE_STORAGE_RESOURCE_GROUP", "test-storage-rg") monkeypatch.setenv("AZURE_SUBSCRIPTION_ID", "test-storage-subid") monkeypatch.setenv("ENABLE_LANGUAGE_PICKER", "true") monkeypatch.setenv("USE_SPEECH_INPUT_BROWSER", "true") monkeypatch.setenv("USE_SPEECH_OUTPUT_AZURE", "true") monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") monkeypatch.setenv("AZURE_SPEECH_SERVICE_ID", "test-id") monkeypatch.setenv("AZURE_SPEECH_SERVICE_LOCATION", "eastus") monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com") for key, value in request.param.items(): monkeypatch.setenv(key, value) if os.getenv("AZURE_USE_AUTHENTICATION") is not None: monkeypatch.delenv("AZURE_USE_AUTHENTICATION") + with mock.patch("app.AzureDeveloperCliCredential") as mock_default_azure_credential: - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: mock_default_azure_credential.return_value = MockAzureCredential() yield
scripts.adlsgen2setup/AdlsGen2Setup.create_or_get_group
Modified
Azure-Samples~azure-search-openai-demo
8f3abc40b6f5a740ddfdf93515c9db30ddd3c867
Adlsgen2 fixes (#1974)
<18>:<add> "securityEnabled": self.security_enabled_groups, <19>:<add> # If Unified does not work for you, then you may need the following settings instead: <add> # "mailEnabled": False, <add> # "mailNickname": group_name, <del> "securityEnabled": self.security_enabled_groups,
# module: scripts.adlsgen2setup class AdlsGen2Setup: def create_or_get_group(self, group_name: str): <0> group_id = None <1> if not self.graph_headers: <2> token_result = await self.credentials.get_token("https://graph.microsoft.com/.default") <3> self.graph_headers = {"Authorization": f"Bearer {token_result.token}"} <4> async with aiohttp.ClientSession(headers=self.graph_headers) as session: <5> logging.info(f"Searching for group {group_name}...") <6> async with session.get( <7> f"https://graph.microsoft.com/v1.0/groups?$select=id&$top=1&$filter=displayName eq '{group_name}'" <8> ) as response: <9> content = await response.json() <10> if response.status != 200: <11> raise Exception(content) <12> if len(content["value"]) == 1: <13> group_id = content["value"][0]["id"] <14> if not group_id: <15> logging.info(f"Could not find group {group_name}, creating...") <16> group = { <17> "displayName": group_name, <18> "groupTypes": ["Unified"], <19> "securityEnabled": self.security_enabled_groups, <20> } <21> async with session.post("https://graph.microsoft.com/v1.0/groups", json=group) as response: <22> content = await response.json() <23> if response.status != 201: <24> raise Exception(content) <25> group_id = content["id"] <26> logging.info(f"Group {group_name} ID {group_id}") <27> return group_id <28>
===========unchanged ref 0=========== at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: aiohttp.client.ClientSession ATTRS = frozenset( [ "_base_url", "_source_traceback", "_connector", "requote_redirect_url", "_loop", "_cookie_jar", "_connector_owner", "_default_auth", "_version", "_json_serialize", "_requote_redirect_url", "_timeout", "_raise_for_status", "_auto_decompress", "_trust_env", "_default_headers", "_skip_auto_headers", "_request_class", "_response_class", "_ws_response_class", "_trace_configs", "_read_bufsize", ] ) ===========unchanged ref 1=========== _source_traceback = None # type: Optional[traceback.StackSummary] _connector = None # type: Optional[BaseConnector] get(url: StrOrURL, *, allow_redirects: bool=True, params: Optional[Mapping[str, str]]=None, data: Any=None, json: Any=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, max_redirects: int=10, compress: Optional[str]=None, chunked: Optional[bool]=None, expect100: bool=False, raise_for_status: Optional[bool]=None, read_until_eof: bool=True, proxy: Optional[StrOrURL]=None, proxy_auth: Optional[BasicAuth]=None, timeout: Union[ClientTimeout, object]=sentinel, verify_ssl: Optional[bool]=None, fingerprint: Optional[bytes]=None, ssl_context: Optional[SSLContext]=None, ssl: Optional[Union[SSLContext, bool, Fingerprint]]=None, proxy_headers: Optional[LooseHeaders]=None, trace_request_ctx: Optional[SimpleNamespace]=None, read_bufsize: Optional[int]=None) -> "_RequestContextManager" ===========unchanged ref 2=========== post(url: StrOrURL, *, data: Any=None, params: Optional[Mapping[str, str]]=None, json: Any=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, allow_redirects: bool=True, max_redirects: int=10, compress: Optional[str]=None, chunked: Optional[bool]=None, expect100: bool=False, raise_for_status: Optional[bool]=None, read_until_eof: bool=True, proxy: Optional[StrOrURL]=None, proxy_auth: Optional[BasicAuth]=None, timeout: Union[ClientTimeout, object]=sentinel, verify_ssl: Optional[bool]=None, fingerprint: Optional[bytes]=None, ssl_context: Optional[SSLContext]=None, ssl: Optional[Union[SSLContext, bool, Fingerprint]]=None, proxy_headers: Optional[LooseHeaders]=None, trace_request_ctx: Optional[SimpleNamespace]=None, read_bufsize: Optional[int]=None) -> "_RequestContextManager" at: aiohttp.client_reqrep.ClientResponse version = None # HTTP-Version status: int = None # type: ignore[assignment] # Status-Code reason = None # Reason-Phrase content: StreamReader = None # type: ignore[assignment] # Payload stream _headers: "CIMultiDictProxy[str]" = None # type: ignore[assignment] _raw_headers: RawHeaders = None # type: ignore[assignment] # Response raw headers _connection = None # current connection _source_traceback: Optional[traceback.StackSummary] = None _closed = True # to allow __del__ for non-initialized properly response _released = False json(*, encoding: Optional[str]=None, loads: JSONDecoder=DEFAULT_JSON_DECODER, content_type: Optional[str]="application/json") -> Any ===========unchanged ref 3=========== at: aiohttp.client_reqrep.ClientResponse.start self.status = message.code at: logging info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scripts.adlsgen2setup.AdlsGen2Setup.__init__ self.credentials = credentials self.security_enabled_groups = security_enabled_groups self.graph_headers: Optional[dict[str, str]] = None
scripts.adlsgen2setup/AdlsGen2Setup.run
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<1>:<add> logger.info(f"Ensuring {self.filesystem_name} exists...") <del> logging.info(f"Ensuring {self.filesystem_name} exists...") <6>:<add> logger.info("Creating groups...") <del> logging.info("Creating groups...") <12>:<add> logger.info("Ensuring directories exist...") <del> logging.info("Ensuring directories exist...") <23>:<add> logger.info("Uploading files...") <del> logging.info("Uploading files...") <27>:<add> logger.error(f"File {file} has unknown directory {directory}, exiting...") <del> logging.error(f"File {file} has unknown directory {directory}, exiting...") <33>:<add> logger.info("Setting access control...") <del> logging.info("Setting access control...")
# module: scripts.adlsgen2setup class AdlsGen2Setup: def run(self): <0> async with self.create_service_client() as service_client: <1> logging.info(f"Ensuring {self.filesystem_name} exists...") <2> async with service_client.get_file_system_client(self.filesystem_name) as filesystem_client: <3> if not await filesystem_client.exists(): <4> await filesystem_client.create_file_system() <5> <6> logging.info("Creating groups...") <7> groups: dict[str, str] = {} <8> for group in self.data_access_control_format["groups"]: <9> group_id = await self.create_or_get_group(group) <10> groups[group] = group_id <11> <12> logging.info("Ensuring directories exist...") <13> directories: dict[str, DataLakeDirectoryClient] = {} <14> try: <15> for directory in self.data_access_control_format["directories"].keys(): <16> directory_client = ( <17> await filesystem_client.create_directory(directory) <18> if directory != "/" <19> else filesystem_client._get_root_directory_client() <20> ) <21> directories[directory] = directory_client <22> <23> logging.info("Uploading files...") <24> for file, file_info in self.data_access_control_format["files"].items(): <25> directory = file_info["directory"] <26> if directory not in directories: <27> logging.error(f"File {file} has unknown directory {directory}, exiting...") <28> return <29> await self.upload_file( <30> directory_client=directories[directory], file_path=os.path.join(self.data_directory, file) <31> ) <32> <33> logging.info("Setting access control...") <34> for directory, access_control in self.data_access_control_format["directories"].items(): <35> directory_client = directories[directory] <36> if "groups" in access_control: <37> for group_name in</s>
===========below chunk 0=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def run(self): # offset: 1 if group_name not in groups: logging.error( f"Directory {directory} has unknown group {group_name} in access control list, exiting" ) return await directory_client.update_access_control_recursive( acl=f"group:{groups[group_name]}:r-x" ) if "oids" in access_control: for oid in access_control["oids"]: await directory_client.update_access_control_recursive(acl=f"user:{oid}:r-x") finally: for directory_client in directories.values(): await directory_client.close() ===========unchanged ref 0=========== at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: os.path join(a: StrPath, *paths: StrPath) -> str join(a: BytesPath, *paths: BytesPath) -> bytes at: scripts.adlsgen2setup logger = logging.getLogger("scripts") at: scripts.adlsgen2setup.AdlsGen2Setup create_service_client() upload_file(directory_client: DataLakeDirectoryClient, file_path: str) create_or_get_group(group_name: str) create_or_get_group(self, group_name: str) at: scripts.adlsgen2setup.AdlsGen2Setup.__init__ self.data_directory = data_directory self.filesystem_name = filesystem_name at: scripts.adlsgen2setup.AdlsGen2Setup.create_or_get_group self.graph_headers = {"Authorization": f"Bearer {token_result.token}"}
scripts.adlsgen2setup/AdlsGen2Setup.create_or_get_group
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<5>:<add> logger.info(f"Searching for group {group_name}...") <del> logging.info(f"Searching for group {group_name}...") <15>:<add> logger.info(f"Could not find group {group_name}, creating...") <del> logging.info(f"Could not find group {group_name}, creating...") <29>:<add> logger.info(f"Group {group_name} ID {group_id}") <del> logging.info(f"Group {group_name} ID {group_id}")
# module: scripts.adlsgen2setup class AdlsGen2Setup: def create_or_get_group(self, group_name: str): <0> group_id = None <1> if not self.graph_headers: <2> token_result = await self.credentials.get_token("https://graph.microsoft.com/.default") <3> self.graph_headers = {"Authorization": f"Bearer {token_result.token}"} <4> async with aiohttp.ClientSession(headers=self.graph_headers) as session: <5> logging.info(f"Searching for group {group_name}...") <6> async with session.get( <7> f"https://graph.microsoft.com/v1.0/groups?$select=id&$top=1&$filter=displayName eq '{group_name}'" <8> ) as response: <9> content = await response.json() <10> if response.status != 200: <11> raise Exception(content) <12> if len(content["value"]) == 1: <13> group_id = content["value"][0]["id"] <14> if not group_id: <15> logging.info(f"Could not find group {group_name}, creating...") <16> group = { <17> "displayName": group_name, <18> "securityEnabled": self.security_enabled_groups, <19> "groupTypes": ["Unified"], <20> # If Unified does not work for you, then you may need the following settings instead: <21> # "mailEnabled": False, <22> # "mailNickname": group_name, <23> } <24> async with session.post("https://graph.microsoft.com/v1.0/groups", json=group) as response: <25> content = await response.json() <26> if response.status != 201: <27> raise Exception(content) <28> group_id = content["id"] <29> logging.info(f"Group {group_name} ID {group_id}") <30> return group_id <31>
===========unchanged ref 0=========== at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: aiohttp.client.ClientSession ATTRS = frozenset( [ "_base_url", "_source_traceback", "_connector", "requote_redirect_url", "_loop", "_cookie_jar", "_connector_owner", "_default_auth", "_version", "_json_serialize", "_requote_redirect_url", "_timeout", "_raise_for_status", "_auto_decompress", "_trust_env", "_default_headers", "_skip_auto_headers", "_request_class", "_response_class", "_ws_response_class", "_trace_configs", "_read_bufsize", ] ) ===========unchanged ref 1=========== _source_traceback = None # type: Optional[traceback.StackSummary] _connector = None # type: Optional[BaseConnector] get(url: StrOrURL, *, allow_redirects: bool=True, params: Optional[Mapping[str, str]]=None, data: Any=None, json: Any=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, max_redirects: int=10, compress: Optional[str]=None, chunked: Optional[bool]=None, expect100: bool=False, raise_for_status: Optional[bool]=None, read_until_eof: bool=True, proxy: Optional[StrOrURL]=None, proxy_auth: Optional[BasicAuth]=None, timeout: Union[ClientTimeout, object]=sentinel, verify_ssl: Optional[bool]=None, fingerprint: Optional[bytes]=None, ssl_context: Optional[SSLContext]=None, ssl: Optional[Union[SSLContext, bool, Fingerprint]]=None, proxy_headers: Optional[LooseHeaders]=None, trace_request_ctx: Optional[SimpleNamespace]=None, read_bufsize: Optional[int]=None) -> "_RequestContextManager" ===========unchanged ref 2=========== post(url: StrOrURL, *, data: Any=None, params: Optional[Mapping[str, str]]=None, json: Any=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, allow_redirects: bool=True, max_redirects: int=10, compress: Optional[str]=None, chunked: Optional[bool]=None, expect100: bool=False, raise_for_status: Optional[bool]=None, read_until_eof: bool=True, proxy: Optional[StrOrURL]=None, proxy_auth: Optional[BasicAuth]=None, timeout: Union[ClientTimeout, object]=sentinel, verify_ssl: Optional[bool]=None, fingerprint: Optional[bytes]=None, ssl_context: Optional[SSLContext]=None, ssl: Optional[Union[SSLContext, bool, Fingerprint]]=None, proxy_headers: Optional[LooseHeaders]=None, trace_request_ctx: Optional[SimpleNamespace]=None, read_bufsize: Optional[int]=None) -> "_RequestContextManager" at: aiohttp.client_reqrep.ClientResponse version = None # HTTP-Version status: int = None # type: ignore[assignment] # Status-Code reason = None # Reason-Phrase content: StreamReader = None # type: ignore[assignment] # Payload stream _headers: "CIMultiDictProxy[str]" = None # type: ignore[assignment] _raw_headers: RawHeaders = None # type: ignore[assignment] # Response raw headers _connection = None # current connection _source_traceback: Optional[traceback.StackSummary] = None _closed = True # to allow __del__ for non-initialized properly response _released = False json(*, encoding: Optional[str]=None, loads: JSONDecoder=DEFAULT_JSON_DECODER, content_type: Optional[str]="application/json") -> Any ===========unchanged ref 3=========== at: aiohttp.client_reqrep.ClientResponse.start self.status = message.code at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.adlsgen2setup logger = logging.getLogger("scripts") at: scripts.adlsgen2setup.AdlsGen2Setup.__init__ self.credentials = credentials self.security_enabled_groups = security_enabled_groups self.graph_headers: Optional[dict[str, str]] = None
scripts.adlsgen2setup/main
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<0>:<add> load_azd_env() <add> <add> if not os.getenv("AZURE_ADLS_GEN2_STORAGE_ACCOUNT"): <add> raise Exception("AZURE_ADLS_GEN2_STORAGE_ACCOUNT must be set to continue") <add> <5>:<add> storage_account_name=os.environ["AZURE_ADLS_GEN2_STORAGE_ACCOUNT"], <del> storage_account_name=args.storage_account,
# module: scripts.adlsgen2setup def main(args: Any): <0> async with AzureDeveloperCliCredential() as credentials: <1> with open(args.data_access_control) as f: <2> data_access_control_format = json.load(f) <3> command = AdlsGen2Setup( <4> data_directory=args.data_directory, <5> storage_account_name=args.storage_account, <6> filesystem_name="gptkbcontainer", <7> security_enabled_groups=args.create_security_enabled_groups, <8> credentials=credentials, <9> data_access_control_format=data_access_control_format, <10> ) <11> await command.run() <12>
===========unchanged ref 0=========== at: json load(fp: SupportsRead[Union[str, bytes]], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any at: load_azd_env load_azd_env() at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] at: scripts.adlsgen2setup logger = logging.getLogger("scripts") at: scripts.adlsgen2setup.AdlsGen2Setup.create_or_get_group group_id = content["value"][0]["id"] group_id = None group_id = content["id"] ===========changed ref 0=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def create_or_get_group(self, group_name: str): group_id = None if not self.graph_headers: token_result = await self.credentials.get_token("https://graph.microsoft.com/.default") self.graph_headers = {"Authorization": f"Bearer {token_result.token}"} async with aiohttp.ClientSession(headers=self.graph_headers) as session: + logger.info(f"Searching for group {group_name}...") - logging.info(f"Searching for group {group_name}...") async with session.get( f"https://graph.microsoft.com/v1.0/groups?$select=id&$top=1&$filter=displayName eq '{group_name}'" ) as response: content = await response.json() if response.status != 200: raise Exception(content) if len(content["value"]) == 1: group_id = content["value"][0]["id"] if not group_id: + logger.info(f"Could not find group {group_name}, creating...") - logging.info(f"Could not find group {group_name}, creating...") group = { "displayName": group_name, "securityEnabled": self.security_enabled_groups, "groupTypes": ["Unified"], # If Unified does not work for you, then you may need the following settings instead: # "mailEnabled": False, # "mailNickname": group_name, } async with session.post("https://graph.microsoft.com/v1.0/groups", json=group) as response: content = await response.json() if response.status != 201: raise Exception(content) group_id = content["id"] + logger.info(f"Group {group_name} ID {group_id}") - logging.info(f"Group {group_name} ID {group_id</s> ===========changed ref 1=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def create_or_get_group(self, group_name: str): # offset: 1 <s>_name} ID {group_id}") - logging.info(f"Group {group_name} ID {group_id}") return group_id ===========changed ref 2=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def run(self): async with self.create_service_client() as service_client: + logger.info(f"Ensuring {self.filesystem_name} exists...") - logging.info(f"Ensuring {self.filesystem_name} exists...") async with service_client.get_file_system_client(self.filesystem_name) as filesystem_client: if not await filesystem_client.exists(): await filesystem_client.create_file_system() + logger.info("Creating groups...") - logging.info("Creating groups...") groups: dict[str, str] = {} for group in self.data_access_control_format["groups"]: group_id = await self.create_or_get_group(group) groups[group] = group_id + logger.info("Ensuring directories exist...") - logging.info("Ensuring directories exist...") directories: dict[str, DataLakeDirectoryClient] = {} try: for directory in self.data_access_control_format["directories"].keys(): directory_client = ( await filesystem_client.create_directory(directory) if directory != "/" else filesystem_client._get_root_directory_client() ) directories[directory] = directory_client + logger.info("Uploading files...") - logging.info("Uploading files...") for file, file_info in self.data_access_control_format["files"].items(): directory = file_info["directory"] if directory not in directories: + logger.error(f"File {file} has unknown directory {directory}, exiting...") - logging.error(f"File {file} has unknown directory {directory}, exiting...") return await self.upload_file( directory_client=directories[directory], file_path=os.path.join(self.data_directory, file) ) + logger.info("Setting access control...") - </s> ===========changed ref 3=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def run(self): # offset: 1 <s>.join(self.data_directory, file) ) + logger.info("Setting access control...") - logging.info("Setting access control...") for directory, access_control in self.data_access_control_format["directories"].items(): directory_client = directories[directory] if "groups" in access_control: for group_name in access_control["groups"]: if group_name not in groups: + logger.error( - logging.error( f"Directory {directory} has unknown group {group_name} in access control list, exiting" ) return await directory_client.update_access_control_recursive( acl=f"group:{groups[group_name]}:r-x" ) if "oids" in access_control: for oid in access_control["oids"]: await directory_client.update_access_control_recursive(acl=f"user:{oid}:r-x") finally: for directory_client in directories.values(): await directory_client.close()
scripts.manageacl/main
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<0>:<add> load_azd_env() <add> <11>:<add> service_name=os.environ["AZURE_SEARCH_SERVICE"], <add> index_name=os.environ["AZURE_SEARCH_INDEX"], <del> service_name=args.search_service, <12>:<del> index_name=args.index,
# module: scripts.manageacl def main(args: Any): <0> # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them <1> azd_credential = ( <2> AzureDeveloperCliCredential() <3> if args.tenant_id is None <4> else AzureDeveloperCliCredential(tenant_id=args.tenant_id, process_timeout=60) <5> ) <6> search_credential: Union[AsyncTokenCredential, AzureKeyCredential] = azd_credential <7> if args.search_key is not None: <8> search_credential = AzureKeyCredential(args.search_key) <9> <10> command = ManageAcl( <11> service_name=args.search_service, <12> index_name=args.index, <13> url=args.url, <14> acl_action=args.acl_action, <15> acl_type=args.acl_type, <16> acl=args.acl, <17> credentials=search_credential, <18> ) <19> await command.run() <20>
===========unchanged ref 0=========== at: load_azd_env load_azd_env() at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: os environ = _createenviron() at: scripts.manageacl logger = logging.getLogger("scripts") ManageAcl(service_name: str, index_name: str, url: str, acl_action: str, acl_type: str, acl: str, credentials: Union[AsyncTokenCredential, AzureKeyCredential]) ===========changed ref 0=========== # module: scripts.manageacl + logger = logging.getLogger("scripts") - logger = logging.getLogger("manageacl") ===========changed ref 1=========== # module: scripts.adlsgen2setup def main(args: Any): + load_azd_env() + + if not os.getenv("AZURE_ADLS_GEN2_STORAGE_ACCOUNT"): + raise Exception("AZURE_ADLS_GEN2_STORAGE_ACCOUNT must be set to continue") + async with AzureDeveloperCliCredential() as credentials: with open(args.data_access_control) as f: data_access_control_format = json.load(f) command = AdlsGen2Setup( data_directory=args.data_directory, + storage_account_name=os.environ["AZURE_ADLS_GEN2_STORAGE_ACCOUNT"], - storage_account_name=args.storage_account, filesystem_name="gptkbcontainer", security_enabled_groups=args.create_security_enabled_groups, credentials=credentials, data_access_control_format=data_access_control_format, ) await command.run() ===========changed ref 2=========== # module: scripts.adlsgen2setup + logger = logging.getLogger("scripts") + if __name__ == "__main__": parser = argparse.ArgumentParser( description="Upload sample data to a Data Lake Storage Gen2 account and associate sample access control lists with it using sample groups", + epilog="Example: ./scripts/adlsgen2setup.py ./data --data-access-control ./scripts/sampleacls.json --create-security-enabled-groups <true|false>", - epilog="Example: ./scripts/adlsgen2setup.py ./data --data-access-control ./scripts/sampleacls.json --storage-account <name of storage account> --create-security-enabled-groups <true|false>", ) parser.add_argument("data_directory", help="Data directory that contains sample PDFs") - parser.add_argument( - "--storage-account", - required=True, - help="Name of the Data Lake Storage Gen2 account to upload the sample data to", - ) parser.add_argument( "--create-security-enabled-groups", required=False, action="store_true", help="Whether or not the sample groups created are security enabled in Microsoft Entra", ) parser.add_argument( "--data-access-control", required=True, help="JSON file describing access control for the sample data" ) parser.add_argument("--verbose", "-v", required=False, action="store_true", help="Verbose output") args = parser.parse_args() if args.verbose: logging.basicConfig() logging.getLogger().setLevel(logging.INFO) asyncio.run(main(args)) ===========changed ref 3=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def create_or_get_group(self, group_name: str): group_id = None if not self.graph_headers: token_result = await self.credentials.get_token("https://graph.microsoft.com/.default") self.graph_headers = {"Authorization": f"Bearer {token_result.token}"} async with aiohttp.ClientSession(headers=self.graph_headers) as session: + logger.info(f"Searching for group {group_name}...") - logging.info(f"Searching for group {group_name}...") async with session.get( f"https://graph.microsoft.com/v1.0/groups?$select=id&$top=1&$filter=displayName eq '{group_name}'" ) as response: content = await response.json() if response.status != 200: raise Exception(content) if len(content["value"]) == 1: group_id = content["value"][0]["id"] if not group_id: + logger.info(f"Could not find group {group_name}, creating...") - logging.info(f"Could not find group {group_name}, creating...") group = { "displayName": group_name, "securityEnabled": self.security_enabled_groups, "groupTypes": ["Unified"], # If Unified does not work for you, then you may need the following settings instead: # "mailEnabled": False, # "mailNickname": group_name, } async with session.post("https://graph.microsoft.com/v1.0/groups", json=group) as response: content = await response.json() if response.status != 201: raise Exception(content) group_id = content["id"] + logger.info(f"Group {group_name} ID {group_id}") - logging.info(f"Group {group_name} ID {group_id</s> ===========changed ref 4=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def create_or_get_group(self, group_name: str): # offset: 1 <s>_name} ID {group_id}") - logging.info(f"Group {group_name} ID {group_id}") return group_id ===========changed ref 5=========== # module: scripts.adlsgen2setup class AdlsGen2Setup: def run(self): async with self.create_service_client() as service_client: + logger.info(f"Ensuring {self.filesystem_name} exists...") - logging.info(f"Ensuring {self.filesystem_name} exists...") async with service_client.get_file_system_client(self.filesystem_name) as filesystem_client: if not await filesystem_client.exists(): await filesystem_client.create_file_system() + logger.info("Creating groups...") - logging.info("Creating groups...") groups: dict[str, str] = {} for group in self.data_access_control_format["groups"]: group_id = await self.create_or_get_group(group) groups[group] = group_id + logger.info("Ensuring directories exist...") - logging.info("Ensuring directories exist...") directories: dict[str, DataLakeDirectoryClient] = {} try: for directory in self.data_access_control_format["directories"].keys(): directory_client = ( await filesystem_client.create_directory(directory) if directory != "/" else filesystem_client._get_root_directory_client() ) directories[directory] = directory_client + logger.info("Uploading files...") - logging.info("Uploading files...") for file, file_info in self.data_access_control_format["files"].items(): directory = file_info["directory"] if directory not in directories: + logger.error(f"File {file} has unknown directory {directory}, exiting...") - logging.error(f"File {file} has unknown directory {directory}, exiting...") return await self.upload_file( directory_client=directories[directory], file_path=os.path.join(self.data_directory, file) ) + logger.info("Setting access control...") - </s>
tests.test_app_config/test_app_config_semanticranker_free
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<10>:<add> assert result["showUserUpload"] is False
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_free(monkeypatch, minimal_env): <0> monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "free") <1> quart_app = app.create_app() <2> async with quart_app.test_app() as test_app: <3> client = test_app.test_client() <4> response = await client.get("/config") <5> assert response.status_code == 200 <6> result = await response.get_json() <7> assert result["showGPT4VOptions"] is False <8> assert result["showSemanticRankerOption"] is True <9> assert result["showVectorOption"] is True <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: tests.test_app_config.test_app_user_upload_processors_docint_localpdf ingester = quart_app.config[app.CONFIG_INGESTER] ===========changed ref 0=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 5 + ===========changed ref 1=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 14 + ===========changed ref 2=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors_docint_localpdf(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") + monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 14 + assert ingester.file_processors[".pdf"] is not ingester.file_processors[".pptx"] + ===========changed ref 3=========== # module: scripts.manageacl + logger = logging.getLogger("scripts") - logger = logging.getLogger("manageacl") ===========changed ref 4=========== # module: scripts.adlsgen2setup def main(args: Any): + load_azd_env() + + if not os.getenv("AZURE_ADLS_GEN2_STORAGE_ACCOUNT"): + raise Exception("AZURE_ADLS_GEN2_STORAGE_ACCOUNT must be set to continue") + async with AzureDeveloperCliCredential() as credentials: with open(args.data_access_control) as f: data_access_control_format = json.load(f) command = AdlsGen2Setup( data_directory=args.data_directory, + storage_account_name=os.environ["AZURE_ADLS_GEN2_STORAGE_ACCOUNT"], - storage_account_name=args.storage_account, filesystem_name="gptkbcontainer", security_enabled_groups=args.create_security_enabled_groups, credentials=credentials, data_access_control_format=data_access_control_format, ) await command.run() ===========changed ref 5=========== # module: scripts.manageacl def main(args: Any): + load_azd_env() + # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them azd_credential = ( AzureDeveloperCliCredential() if args.tenant_id is None else AzureDeveloperCliCredential(tenant_id=args.tenant_id, process_timeout=60) ) search_credential: Union[AsyncTokenCredential, AzureKeyCredential] = azd_credential if args.search_key is not None: search_credential = AzureKeyCredential(args.search_key) command = ManageAcl( + service_name=os.environ["AZURE_SEARCH_SERVICE"], + index_name=os.environ["AZURE_SEARCH_INDEX"], - service_name=args.search_service, - index_name=args.index, url=args.url, acl_action=args.acl_action, acl_type=args.acl_type, acl=args.acl, credentials=search_credential, ) await command.run() ===========changed ref 6=========== # module: scripts.adlsgen2setup + logger = logging.getLogger("scripts") + if __name__ == "__main__": parser = argparse.ArgumentParser( description="Upload sample data to a Data Lake Storage Gen2 account and associate sample access control lists with it using sample groups", + epilog="Example: ./scripts/adlsgen2setup.py ./data --data-access-control ./scripts/sampleacls.json --create-security-enabled-groups <true|false>", - epilog="Example: ./scripts/adlsgen2setup.py ./data --data-access-control ./scripts/sampleacls.json --storage-account <name of storage account> --create-security-enabled-groups <true|false>", ) parser.add_argument("data_directory", help="Data directory that contains sample PDFs") - parser.add_argument( - "--storage-account", - required=True, - help="Name of the Data Lake Storage Gen2 account to upload the sample data to", - ) parser.add_argument( "--create-security-enabled-groups", required=False, action="store_true", help="Whether or not the sample groups created are security enabled in Microsoft Entra", ) parser.add_argument( "--data-access-control", required=True, help="JSON file describing access control for the sample data" ) parser.add_argument("--verbose", "-v", required=False, action="store_true", help="Verbose output") args = parser.parse_args() if args.verbose: logging.basicConfig() logging.getLogger().setLevel(logging.INFO) asyncio.run(main(args))
tests.test_app_config/test_app_config_semanticranker_disabled
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<10>:<add> assert result["showUserUpload"] is False
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_disabled(monkeypatch, minimal_env): <0> monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "disabled") <1> quart_app = app.create_app() <2> async with quart_app.test_app() as test_app: <3> client = test_app.test_client() <4> response = await client.get("/config") <5> assert response.status_code == 200 <6> result = await response.get_json() <7> assert result["showGPT4VOptions"] is False <8> assert result["showSemanticRankerOption"] is False <9> assert result["showVectorOption"] is True <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: tests.test_app_config.test_app_user_upload_processors_docint_localhtml ingester = quart_app.config[app.CONFIG_INGESTER] ===========changed ref 0=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_free(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "free") quart_app = app.create_app() async with quart_app.test_app() as test_app: client = test_app.test_client() response = await client.get("/config") assert response.status_code == 200 result = await response.get_json() assert result["showGPT4VOptions"] is False assert result["showSemanticRankerOption"] is True assert result["showVectorOption"] is True + assert result["showUserUpload"] is False ===========changed ref 1=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 5 + ===========changed ref 2=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 14 + ===========changed ref 3=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors_docint_localhtml(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") + monkeypatch.setenv("USE_LOCAL_HTML_PARSER", "true") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 14 + assert ingester.file_processors[".html"] is not ingester.file_processors[".pptx"] + ===========changed ref 4=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors_docint_localpdf(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") + monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 14 + assert ingester.file_processors[".pdf"] is not ingester.file_processors[".pptx"] + ===========changed ref 5=========== # module: scripts.manageacl + logger = logging.getLogger("scripts") - logger = logging.getLogger("manageacl") ===========changed ref 6=========== # module: scripts.adlsgen2setup def main(args: Any): + load_azd_env() + + if not os.getenv("AZURE_ADLS_GEN2_STORAGE_ACCOUNT"): + raise Exception("AZURE_ADLS_GEN2_STORAGE_ACCOUNT must be set to continue") + async with AzureDeveloperCliCredential() as credentials: with open(args.data_access_control) as f: data_access_control_format = json.load(f) command = AdlsGen2Setup( data_directory=args.data_directory, + storage_account_name=os.environ["AZURE_ADLS_GEN2_STORAGE_ACCOUNT"], - storage_account_name=args.storage_account, filesystem_name="gptkbcontainer", security_enabled_groups=args.create_security_enabled_groups, credentials=credentials, data_access_control_format=data_access_control_format, ) await command.run() ===========changed ref 7=========== # module: scripts.manageacl def main(args: Any): + load_azd_env() + # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them azd_credential = ( AzureDeveloperCliCredential() if args.tenant_id is None else AzureDeveloperCliCredential(tenant_id=args.tenant_id, process_timeout=60) ) search_credential: Union[AsyncTokenCredential, AzureKeyCredential] = azd_credential if args.search_key is not None: search_credential = AzureKeyCredential(args.search_key) command = ManageAcl( + service_name=os.environ["AZURE_SEARCH_SERVICE"], + index_name=os.environ["AZURE_SEARCH_INDEX"], - service_name=args.search_service, - index_name=args.index, url=args.url, acl_action=args.acl_action, acl_type=args.acl_type, acl=args.acl, credentials=search_credential, ) await command.run()
app.backend.prepdocs/setup_file_processors
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<0>:<del> html_parser: Parser <1>:<del> pdf_parser: Parser <2>:<del> doc_int_parser: DocumentAnalysisParser <3>:<add> sentence_text_splitter = SentenceTextSplitter(has_image_embeddings=search_images) <4>:<add> doc_int_parser: Optional[DocumentAnalysisParser] = None <13>:<add> <add> pdf_parser: Optional[Parser] = None <15>:<add> elif document_intelligence_service is not None: <add> pdf_parser = doc_int_parser <16>:<add> logger.warning("No PDF parser available") <add> <add> html_parser: Optional[Parser] = None <del> pdf_parser = doc_int_parser <19>:<add> elif document_intelligence_service is not None: <add> html_parser = doc_int_parser <20>:<del> html_parser = doc_int_parser <21>:<del> sentence_text_splitter = SentenceTextSplitter(has_image_embeddings=search_images) <22>:<del> return { <23>:<del> ".pdf": FileProcessor(pdf_parser, sentence_text_splitter), <24>:<del> ".html": FileProcessor(html_parser, sentence_text_splitter), <25>:<add> logger.warning("No HTML parser available") <add> <add> # These file formats can always be parsed: <add> file_processors = { <26>:<del> ".docx": FileProcessor(doc_int_parser, sentence_text_splitter), <27>:<del> ".pptx": FileProcessor(doc_int_parser, sentence_text_splitter),
# module: app.backend.prepdocs def setup_file_processors( azure_credential: AsyncTokenCredential, document_intelligence_service: Union[str, None], document_intelligence_key: Union[str, None] = None, local_pdf_parser: bool = False, local_html_parser: bool = False, search_images: bool = False, ): <0> html_parser: Parser <1> pdf_parser: Parser <2> doc_int_parser: DocumentAnalysisParser <3> <4> # check if Azure Document Intelligence credentials are provided <5> if document_intelligence_service is not None: <6> documentintelligence_creds: Union[AsyncTokenCredential, AzureKeyCredential] = ( <7> azure_credential if document_intelligence_key is None else AzureKeyCredential(document_intelligence_key) <8> ) <9> doc_int_parser = DocumentAnalysisParser( <10> endpoint=f"https://{document_intelligence_service}.cognitiveservices.azure.com/", <11> credential=documentintelligence_creds, <12> ) <13> if local_pdf_parser or document_intelligence_service is None: <14> pdf_parser = LocalPdfParser() <15> else: <16> pdf_parser = doc_int_parser <17> if local_html_parser or document_intelligence_service is None: <18> html_parser = LocalHTMLParser() <19> else: <20> html_parser = doc_int_parser <21> sentence_text_splitter = SentenceTextSplitter(has_image_embeddings=search_images) <22> return { <23> ".pdf": FileProcessor(pdf_parser, sentence_text_splitter), <24> ".html": FileProcessor(html_parser, sentence_text_splitter), <25> ".json": FileProcessor(JsonParser(), SimpleTextSplitter()), <26> ".docx": FileProcessor(doc_int_parser, sentence_text_splitter), <27> ".pptx": FileProcessor(doc_int_parser, sentence_text_splitter),</s>
===========below chunk 0=========== # module: app.backend.prepdocs def setup_file_processors( azure_credential: AsyncTokenCredential, document_intelligence_service: Union[str, None], document_intelligence_key: Union[str, None] = None, local_pdf_parser: bool = False, local_html_parser: bool = False, search_images: bool = False, ): # offset: 1 ".png": FileProcessor(doc_int_parser, sentence_text_splitter), ".jpg": FileProcessor(doc_int_parser, sentence_text_splitter), ".jpeg": FileProcessor(doc_int_parser, sentence_text_splitter), ".tiff": FileProcessor(doc_int_parser, sentence_text_splitter), ".bmp": FileProcessor(doc_int_parser, sentence_text_splitter), ".heic": FileProcessor(doc_int_parser, sentence_text_splitter), ".md": FileProcessor(TextParser(), sentence_text_splitter), ".txt": FileProcessor(TextParser(), sentence_text_splitter), } ===========unchanged ref 0=========== at: app.backend.prepdocs logger = logging.getLogger("scripts") at: logging.Logger warning(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: prepdocslib.fileprocessor FileProcessor(parser: Parser, splitter: TextSplitter) at: prepdocslib.htmlparser LocalHTMLParser() at: prepdocslib.jsonparser JsonParser() at: prepdocslib.parser Parser() at: prepdocslib.pdfparser LocalPdfParser() DocumentAnalysisParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout") at: prepdocslib.textparser TextParser() at: prepdocslib.textsplitter SentenceTextSplitter(has_image_embeddings: bool, max_tokens_per_section: int=500) SimpleTextSplitter(max_object_length: int=1000) ===========changed ref 0=========== # module: app.backend.prepdocs + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 1=========== # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 2=========== # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: scripts.manageacl + logger = logging.getLogger("scripts") - logger = logging.getLogger("manageacl") ===========changed ref 4=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 5 + ===========changed ref 5=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_disabled(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "disabled") quart_app = app.create_app() async with quart_app.test_app() as test_app: client = test_app.test_client() response = await client.get("/config") assert response.status_code == 200 result = await response.get_json() assert result["showGPT4VOptions"] is False assert result["showSemanticRankerOption"] is False assert result["showVectorOption"] is True + assert result["showUserUpload"] is False ===========changed ref 6=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_free(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "free") quart_app = app.create_app() async with quart_app.test_app() as test_app: client = test_app.test_client() response = await client.get("/config") assert response.status_code == 200 result = await response.get_json() assert result["showGPT4VOptions"] is False assert result["showSemanticRankerOption"] is True assert result["showVectorOption"] is True + assert result["showUserUpload"] is False ===========changed ref 7=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 14 + ===========changed ref 8=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_config_user_upload_bad_openai_config(monkeypatch, minimal_env): + """Check that this combo works correctly with prepdocs.py embedding service.""" + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("OPENAI_HOST", "openai") + quart_app = app.create_app() + with pytest.raises( + quart.testing.app.LifespanError, match="OpenAI key is required when using the non-Azure OpenAI API" + ): + async with quart_app.test_app() as test_app: + test_app.test_client() +
scripts.auth_init/main
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<0>:<add> load_azd_env() <add> <4>:<add> auth_tenant = os.getenv("AZURE_AUTH_TENANT_ID", os.getenv("AZURE_TENANT_ID")) <del> auth_tenant = os.getenv("AZURE_AUTH_TENANT_ID", os.environ["AZURE_TENANT_ID"]) <5>:<add> if not auth_tenant: <add> print( <add> "Error: No tenant ID set for authentication. Run `azd env set AZURE_AUTH_TENANT_ID tenant-id` to set the tenant ID." <add> ) <add> exit(1)
# module: scripts.auth_init def main(): <0> if not test_authentication_enabled(): <1> print("Not setting up authentication.") <2> exit(0) <3> <4> auth_tenant = os.getenv("AZURE_AUTH_TENANT_ID", os.environ["AZURE_TENANT_ID"]) <5> print("Setting up authentication for tenant", auth_tenant) <6> credential = AzureDeveloperCliCredential(tenant_id=auth_tenant) <7> <8> scopes = ["https://graph.microsoft.com/.default"] <9> graph_client = GraphServiceClient(credentials=credential, scopes=scopes) <10> <11> app_identifier = random_app_identifier() <12> server_object_id, server_app_id, _ = await create_or_update_application_with_secret( <13> graph_client, <14> app_id_env_var="AZURE_SERVER_APP_ID", <15> app_secret_env_var="AZURE_SERVER_APP_SECRET", <16> request_app=server_app_initial(app_identifier), <17> ) <18> print("Setting up server application permissions...") <19> server_app_permission = server_app_permission_setup(server_app_id) <20> await graph_client.applications.by_application_id(server_object_id).patch(server_app_permission) <21> <22> _, client_app_id, _ = await create_or_update_application_with_secret( <23> graph_client, <24> app_id_env_var="AZURE_CLIENT_APP_ID", <25> app_secret_env_var="AZURE_CLIENT_APP_SECRET", <26> request_app=client_app(server_app_id, server_app_permission, app_identifier), <27> ) <28> <29> print("Setting up server known client applications...") <30> await graph_client.applications.by_application_id(server_object_id).patch( <31> server_app_known_client_application(client_app_id) <32> ) <33> print("Authentication setup complete.") <34>
===========unchanged ref 0=========== at: auth_common test_authentication_enabled() at: load_azd_env load_azd_env() at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] at: scripts.auth_init create_or_update_application_with_secret(graph_client: GraphServiceClient, app_id_env_var: str, app_secret_env_var: str, request_app: Application) -> Tuple[str, str, bool] random_app_identifier() server_app_initial(identifier: int) -> Application server_app_permission_setup(server_app_id: str) -> Application ===========changed ref 0=========== + # module: scripts.load_azd_env + + ===========changed ref 1=========== + # module: scripts.load_azd_env + logger = logging.getLogger("scripts") + ===========changed ref 2=========== # module: app.backend.prepdocslib.embeddings + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: app.backend.prepdocslib.searchmanager + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: app.backend.prepdocs + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: scripts.manageacl + logger = logging.getLogger("scripts") - logger = logging.getLogger("manageacl") ===========changed ref 10=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 5 + ===========changed ref 11=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_disabled(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "disabled") quart_app = app.create_app() async with quart_app.test_app() as test_app: client = test_app.test_client() response = await client.get("/config") assert response.status_code == 200 result = await response.get_json() assert result["showGPT4VOptions"] is False assert result["showSemanticRankerOption"] is False assert result["showVectorOption"] is True + assert result["showUserUpload"] is False ===========changed ref 12=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_free(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "free") quart_app = app.create_app() async with quart_app.test_app() as test_app: client = test_app.test_client() response = await client.get("/config") assert response.status_code == 200 result = await response.get_json() assert result["showGPT4VOptions"] is False assert result["showSemanticRankerOption"] is True assert result["showVectorOption"] is True + assert result["showUserUpload"] is False ===========changed ref 13=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 14 + ===========changed ref 14=========== + # module: scripts.load_azd_env + def load_azd_env(): + """Get path to current azd env file and load file using python-dotenv""" + result = subprocess.run("azd env list -o json", shell=True, capture_output=True, text=True) + if result.returncode != 0: + raise Exception("Error loading azd env") + env_json = json.loads(result.stdout) + env_file_path = None + for entry in env_json: + if entry["IsDefault"]: + env_file_path = entry["DotEnvPath"] + if not env_file_path: + raise Exception("No default azd env file found") + logger.info(f"Loading azd env from {env_file_path}") + load_dotenv(env_file_path, override=True) + ===========changed ref 15=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_config_user_upload_bad_openai_config(monkeypatch, minimal_env): + """Check that this combo works correctly with prepdocs.py embedding service.""" + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("OPENAI_HOST", "openai") + quart_app = app.create_app() + with pytest.raises( + quart.testing.app.LifespanError, match="OpenAI key is required when using the non-Azure OpenAI API" + ): + async with quart_app.test_app() as test_app: + test_app.test_client() +
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
b8f0a740f5766f902b4cee3417651d3a5ee04306
Refactor scripts to avoid anti-patterns, redundancy (#1986)
<18>:<add> # Set our own logger levels to INFO by default <del> # Set the app logger level to INFO by default <19>:<add> app_level = os.getenv("APP_LOG_LEVEL", "INFO") <del> default_level = "INFO" <20>:<add> app.logger.setLevel(os.getenv("APP_LOG_LEVEL", app_level)) <del> app.logger.setLevel(os.getenv("APP_LOG_LEVEL", default_level)) <21>:<add> logging.getLogger("scripts").setLevel(app_level)
# module: app.backend.app def create_app(): <0> app = Quart(__name__) <1> app.register_blueprint(bp) <2> <3> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <4> app.logger.info("APPLICATIONINSIGHTS_CONNECTION_STRING is set, enabling Azure Monitor") <5> configure_azure_monitor() <6> # This tracks HTTP requests made by aiohttp: <7> AioHttpClientInstrumentor().instrument() <8> # This tracks HTTP requests made by httpx: <9> HTTPXClientInstrumentor().instrument() <10> # This tracks OpenAI SDK requests: <11> OpenAIInstrumentor().instrument() <12> # This middleware tracks app route requests: <13> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) # type: ignore[assignment] <14> <15> # Log levels should be one of https://docs.python.org/3/library/logging.html#logging-levels <16> # Set root level to WARNING to avoid seeing overly verbose logs from SDKS <17> logging.basicConfig(level=logging.WARNING) <18> # Set the app logger level to INFO by default <19> default_level = "INFO" <20> app.logger.setLevel(os.getenv("APP_LOG_LEVEL", default_level)) <21> <22> if allowed_origin := os.getenv("ALLOWED_ORIGIN"): <23> app.logger.info("ALLOWED_ORIGIN is set, enabling CORS for %s", allowed_origin) <24> cors(app, allow_origin=allowed_origin, allow_methods=["GET", "POST"]) <25> return app <26>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: logging WARNING = 30 basicConfig(*, filename: Optional[StrPath]=..., filemode: str=..., format: str=..., datefmt: Optional[str]=..., style: str=..., level: Optional[_Level]=..., stream: Optional[IO[str]]=..., handlers: Optional[Iterable[Handler]]=...) -> None getLogger(name: Optional[str]=...) -> Logger at: logging.Logger setLevel(level: _Level) -> None at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== + # module: scripts.load_azd_env + + ===========changed ref 1=========== + # module: scripts.load_azd_env + logger = logging.getLogger("scripts") + ===========changed ref 2=========== # module: app.backend.prepdocslib.integratedvectorizerstrategy + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 3=========== # module: app.backend.prepdocslib.filestrategy + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 4=========== # module: app.backend.prepdocslib.embeddings + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 5=========== # module: app.backend.prepdocslib.listfilestrategy + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 6=========== # module: app.backend.prepdocslib.textsplitter + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 7=========== # module: app.backend.prepdocslib.searchmanager + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 8=========== # module: app.backend.prepdocs + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 9=========== # module: app.backend.prepdocslib.blobmanager + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 10=========== # module: app.backend.prepdocslib.pdfparser + logger = logging.getLogger("scripts") - logger = logging.getLogger("ingester") ===========changed ref 11=========== # module: scripts.manageacl + logger = logging.getLogger("scripts") - logger = logging.getLogger("manageacl") ===========changed ref 12=========== # module: app.backend.main + # WEBSITE_HOSTNAME is always set by App Service, RUNNING_IN_PRODUCTION is set in main.bicep + RUNNING_ON_AZURE = os.getenv("WEBSITE_HOSTNAME") is not None or os.getenv("RUNNING_IN_PRODUCTION") is not None + + if not RUNNING_ON_AZURE: + load_azd_env() + app = create_app() ===========changed ref 13=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 5 + ===========changed ref 14=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_disabled(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "disabled") quart_app = app.create_app() async with quart_app.test_app() as test_app: client = test_app.test_client() response = await client.get("/config") assert response.status_code == 200 result = await response.get_json() assert result["showGPT4VOptions"] is False assert result["showSemanticRankerOption"] is False assert result["showVectorOption"] is True + assert result["showUserUpload"] is False ===========changed ref 15=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_config_semanticranker_free(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_SEARCH_SEMANTIC_RANKER", "free") quart_app = app.create_app() async with quart_app.test_app() as test_app: client = test_app.test_client() response = await client.get("/config") assert response.status_code == 200 result = await response.get_json() assert result["showGPT4VOptions"] is False assert result["showSemanticRankerOption"] is True assert result["showVectorOption"] is True + assert result["showUserUpload"] is False ===========changed ref 16=========== # module: tests.test_app_config + @pytest.mark.asyncio + async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): + monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") + monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") + monkeypatch.setenv("USE_USER_UPLOAD", "true") + monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") + + quart_app = app.create_app() + async with quart_app.test_app(): + ingester = quart_app.config[app.CONFIG_INGESTER] + assert ingester is not None + assert len(ingester.file_processors.keys()) == 14 + ===========changed ref 17=========== + # module: scripts.load_azd_env + def load_azd_env(): + """Get path to current azd env file and load file using python-dotenv""" + result = subprocess.run("azd env list -o json", shell=True, capture_output=True, text=True) + if result.returncode != 0: + raise Exception("Error loading azd env") + env_json = json.loads(result.stdout) + env_file_path = None + for entry in env_json: + if entry["IsDefault"]: + env_file_path = entry["DotEnvPath"] + if not env_file_path: + raise Exception("No default azd env file found") + logger.info(f"Loading azd env from {env_file_path}") + load_dotenv(env_file_path, override=True) +
app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
9722c788226009ab213452b8f502bed85a0a1afc
Changes for easier working with local models (#1992)
<15>:<add> self.gpt4v_token_limit = get_token_limit(gpt4v_model, self.ALLOW_NON_GPT_MODELS) <del> self.gpt4v_token_limit = get_token_limit(gpt4v_model)
<s> Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): <0> self.search_client = search_client <1> self.blob_container_client = blob_container_client <2> self.openai_client = openai_client <3> self.auth_helper = auth_helper <4> self.embedding_model = embedding_model <5> self.embedding_deployment = embedding_deployment <6> self.embedding_dimensions = embedding_dimensions <7> self.sourcepage_field = sourcepage_field <8> self.content_field = content_field <9> self.gpt4v_deployment = gpt4v_deployment <10> self.gpt4v_model = gpt4v_model <11> self.query_language = query_language <12> self.query_speller = query_speller <13> self.vision_endpoint = vision_endpoint <14> self.vision_token_provider = vision_token_provider <15> self.gpt4v_token_limit = get_token_limit(gpt4v_model) <16>
===========unchanged ref 0=========== at: approaches.approach.Approach ALLOW_NON_GPT_MODELS = True __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False, enable_global_documents: bool=False, enable_unauthenticated_access: bool=False) at: typing Awaitable = _alias(collections.abc.Awaitable, 1) Callable = _CallableType(collections.abc.Callable, 2)
app.backend.approaches.retrievethenreadvision/RetrieveThenReadVisionApproach.run
Modified
Azure-Samples~azure-search-openai-demo
9722c788226009ab213452b8f502bed85a0a1afc
Changes for easier working with local models (#1992)
# module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: <0> q = messages[-1]["content"] <1> if not isinstance(q, str): <2> raise ValueError("The most recent message content must be a string.") <3> <4> overrides = context.get("overrides", {}) <5> seed = overrides.get("seed", None) <6> auth_claims = context.get("auth_claims", {}) <7> use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] <8> use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <9> use_semantic_ranker = True if overrides.get("semantic_ranker") else False <10> use_semantic_captions = True if overrides.get("semantic_captions") else False <11> top = overrides.get("top", 3) <12> minimum_search_score = overrides.get("minimum_search_score", 0.0) <13> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <14> filter = self.build_filter(overrides, auth_claims) <15> <16> vector_fields = overrides.get("vector_fields", ["embedding"]) <17> send_text_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] <18> send_images_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "images", None] <19> <20> # If retrieval mode includes vectors, compute an embedding for the query <21> vectors = [] <22> if use_vector_search: <23> for field in vector_fields: <24> vector = ( <25> await self.compute_text_embedding(q) <26> </s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 else await self.compute_image_embedding(q) ) vectors.append(vector) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if send_text_to_gptvision: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if send_images_to_gptvision: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) response_token_limit = 1024 updated_messages = build_messages( model=self.gpt4v_model, system_prompt=overrides.get("prompt_template", self.system_chat_template_gpt4v), new_user_content=user_content, max_tokens=self.gpt4v_token_limit - response_token_limit, ) chat_completion =</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s> max_tokens=self.gpt4v_token_limit - response_token_limit, ) chat_completion = await self.openai_client.chat.completions.create( model=self.gpt4v_deployment if self.gpt4v_deployment else self.gpt4v_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, seed=seed, ) data_points = { "text": sources_content, "images": [d["image_url"] for d in image_list], } extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "vector_fields": vector_fields, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", updated_messages, ( {"model": self.gpt4v_model, "deployment": self.g</s> ===========below chunk 2=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 3 <s>v_deployment} if self.gpt4v_deployment else {"model": self.gpt4v_model} ), ), ], } return { "message": { "content": chat_completion.choices[0].message.content, "role": chat_completion.choices[0].message.role, }, "context": extra_info, "session_state": session_state, } ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach system_chat_template_gpt4v = ( "You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. " + "Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name> " + "Each text source starts in a new line and has the file name followed by colon and the actual information " + "Always include the source name from the image or text for each fact you use in the response in the format: [filename] " + "Answer the following question using only the data provided in the sources below. " + "The text and image source can be the same file name, don't use the image title when citing the image source, only use the file name as mentioned " + "If you cannot answer using the sources below, say you don't know. Return just the answer without any input texts " ) at: app.backend.approaches.retrievethenreadvision.RetrieveThenReadVisionApproach.__init__ self.blob_container_client = blob_container_client self.openai_client = openai_client self.gpt4v_model = gpt4v_model self.gpt4v_token_limit = get_token_limit(gpt4v_model, self.ALLOW_NON_GPT_MODELS) at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach ALLOW_NON_GPT_MODELS = True build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] ===========unchanged ref 1=========== search(top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_text_search: bool, use_vector_search: bool, use_semantic_ranker: bool, use_semantic_captions: bool, minimum_search_score: Optional[float], minimum_reranker_score: Optional[float]) -> List[Document] get_sources_content(results: List[Document], use_semantic_captions: bool, use_image_citation: bool) -> list[str] compute_text_embedding(q: str) compute_image_embedding(q: str) run(self, messages: list[ChatCompletionMessageParam], session_state: Any=None, context: dict[str, Any]={}) -> dict[str, Any] at: core.imageshelper fetch_image(blob_container_client: ContainerClient, result: Document) -> Optional[ImageURL] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
9722c788226009ab213452b8f502bed85a0a1afc
Changes for easier working with local models (#1992)
<13>:<add> self.chatgpt_token_limit = get_token_limit(chatgpt_model, self.ALLOW_NON_GPT_MODELS) <del> self.chatgpt_token_limit = get_token_limit(chatgpt_model)
<s> openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.chatgpt_deployment = chatgpt_deployment <2> self.openai_client = openai_client <3> self.auth_helper = auth_helper <4> self.chatgpt_model = chatgpt_model <5> self.embedding_model = embedding_model <6> self.embedding_dimensions = embedding_dimensions <7> self.chatgpt_deployment = chatgpt_deployment <8> self.embedding_deployment = embedding_deployment <9> self.sourcepage_field = sourcepage_field <10> self.content_field = content_field <11> self.query_language = query_language <12> self.query_speller = query_speller <13> self.chatgpt_token_limit = get_token_limit(chatgpt_model) <14>
===========unchanged ref 0=========== at: approaches.approach.Approach ALLOW_NON_GPT_MODELS = True __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False, enable_global_documents: bool=False, enable_unauthenticated_access: bool=False) ===========changed ref 0=========== <s> Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider + self.gpt4v_token_limit = get_token_limit(gpt4v_model, self.ALLOW_NON_GPT_MODELS) - self.gpt4v_token_limit = get_token_limit(gpt4v_model) ===========changed ref 1=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: q = messages[-1]["content"] if not isinstance(q, str): raise ValueError("The most recent message content must be a string.") overrides = context.get("overrides", {}) seed = overrides.get("seed", None) auth_claims = context.get("auth_claims", {}) use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_ranker = True if overrides.get("semantic_ranker") else False use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) filter = self.build_filter(overrides, auth_claims) vector_fields = overrides.get("vector_fields", ["embedding"]) send_text_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "texts", None] send_images_to_gptvision = overrides.get("gpt4v_input") in ["textAndImages", "images", None] # If retrieval mode includes vectors, compute an embedding for the query vectors = [] if use_vector_search: for field in vector_fields: vector = ( await self.compute_text_embedding(q) if field == "embedding" else await self.compute_image_embedding(q) ) vectors</s> ===========changed ref 2=========== # module: app.backend.approaches.retrievethenreadvision class RetrieveThenReadVisionApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 <s>q) if field == "embedding" else await self.compute_image_embedding(q) ) vectors.append(vector) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) image_list: list[ChatCompletionContentPartImageParam] = [] user_content: list[ChatCompletionContentPartParam] = [{"text": q, "type": "text"}] # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) if send_text_to_gptvision: content = "\n".join(sources_content) user_content.append({"text": content, "type": "text"}) if send_images_to_gptvision: for result in results: url = await fetch_image(self.blob_container_client, result) if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) response_token_limit = 1024 updated_messages = build_messages( model=self.gpt4v_model, system_prompt=overrides.get("prompt_template", self.system_chat_template_gpt4v),</s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
9722c788226009ab213452b8f502bed85a0a1afc
Changes for easier working with local models (#1992)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: <0> q = messages[-1]["content"] <1> if not isinstance(q, str): <2> raise ValueError("The most recent message content must be a string.") <3> overrides = context.get("overrides", {}) <4> seed = overrides.get("seed", None) <5> auth_claims = context.get("auth_claims", {}) <6> use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] <7> use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <8> use_semantic_ranker = True if overrides.get("semantic_ranker") else False <9> use_semantic_captions = True if overrides.get("semantic_captions") else False <10> top = overrides.get("top", 3) <11> minimum_search_score = overrides.get("minimum_search_score", 0.0) <12> minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) <13> filter = self.build_filter(overrides, auth_claims) <14> <15> # If retrieval mode includes vectors, compute an embedding for the query <16> vectors: list[VectorQuery] = [] <17> if use_vector_search: <18> vectors.append(await self.compute_text_embedding(q)) <19> <20> results = await self.search( <21> top, <22> q, <23> filter, <24> vectors, <25> use_text_search, <26> use_vector_search, <27> use_semantic_ranker, <28> use_semantic_captions, <29> minimum_search_score, <30> minimum_reranker_score, <31> ) <32> <33> # Process</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=False) # Append user message content = "\n".join(sources_content) user_content = q + "\n" + f"Sources:\n {content}" response_token_limit = 1024 updated_messages = build_messages( model=self.chatgpt_model, system_prompt=overrides.get("prompt_template", self.system_chat_template), few_shots=[{"role": "user", "content": self.question}, {"role": "assistant", "content": self.answer}], new_user_content=user_content, max_tokens=self.chatgpt_token_limit - response_token_limit, ) chat_completion = await self.openai_client.chat.completions.create( # Azure OpenAI takes the deployment name as the model name model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, seed=seed, ) data_points = {"text": sources_content} extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { "use_semantic_captions": use_semantic_captions, "use_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, </s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 2 <s>_semantic_ranker": use_semantic_ranker, "top": top, "filter": filter, "use_vector_search": use_vector_search, "use_text_search": use_text_search, }, ), ThoughtStep( "Search results", [result.serialize_for_results() for result in results], ), ThoughtStep( "Prompt to generate answer", updated_messages, ( {"model": self.chatgpt_model, "deployment": self.chatgpt_deployment} if self.chatgpt_deployment else {"model": self.chatgpt_model} ), ), ], } return { "message": { "content": chat_completion.choices[0].message.content, "role": chat_completion.choices[0].message.role, }, "context": extra_info, "session_state": session_state, } ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.openai_client = openai_client self.chatgpt_model = chatgpt_model self.chatgpt_token_limit = get_token_limit(chatgpt_model, self.ALLOW_NON_GPT_MODELS) ===========unchanged ref 1=========== at: approaches.approach ThoughtStep(title: str, description: Optional[Any], props: Optional[dict[str, Any]]=None) at: approaches.approach.Approach ALLOW_NON_GPT_MODELS = True build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str] search(top: int, query_text: Optional[str], filter: Optional[str], vectors: List[VectorQuery], use_text_search: bool, use_vector_search: bool, use_semantic_ranker: bool, use_semantic_captions: bool, minimum_search_score: Optional[float], minimum_reranker_score: Optional[float]) -> List[Document] get_sources_content(results: List[Document], use_semantic_captions: bool, use_image_citation: bool) -> list[str] compute_text_embedding(q: str) run(self, messages: list[ChatCompletionMessageParam], session_state: Any=None, context: dict[str, Any]={}) -> dict[str, Any] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrievereadvision/ChatReadRetrieveReadVisionApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
9722c788226009ab213452b8f502bed85a0a1afc
Changes for easier working with local models (#1992)
<17>:<add> self.chatgpt_token_limit = get_token_limit(gpt4v_model, default_to_minimum=self.ALLOW_NON_GPT_MODELS) <del> self.chatgpt_token_limit = get_token_limit(gpt4v_model)
<s>Azure OpenAI gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): <0> self.search_client = search_client <1> self.blob_container_client = blob_container_client <2> self.openai_client = openai_client <3> self.auth_helper = auth_helper <4> self.chatgpt_model = chatgpt_model <5> self.chatgpt_deployment = chatgpt_deployment <6> self.gpt4v_deployment = gpt4v_deployment <7> self.gpt4v_model = gpt4v_model <8> self.embedding_deployment = embedding_deployment <9> self.embedding_model = embedding_model <10> self.embedding_dimensions = embedding_dimensions <11> self.sourcepage_field = sourcepage_field <12> self.content_field = content_field <13> self.query_language = query_language <14> self.query_speller = query_speller <15> self.vision_endpoint = vision_endpoint <16> self.vision_token_provider = vision_token_provider <17> self.chatgpt_token_limit = get_token_limit(gpt4v_model) <18>
===========unchanged ref 0=========== at: approaches.approach.Approach ALLOW_NON_GPT_MODELS = True __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False, enable_global_documents: bool=False, enable_unauthenticated_access: bool=False) at: typing Awaitable = _alias(collections.abc.Awaitable, 1) Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== <s> openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.embedding_model = embedding_model self.embedding_dimensions = embedding_dimensions self.chatgpt_deployment = chatgpt_deployment self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller + self.chatgpt_token_limit = get_token_limit(chatgpt_model, self.ALLOW_NON_GPT_MODELS) - self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 1=========== <s> Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider + self.gpt4v_token_limit = get_token_limit(gpt4v_model, self.ALLOW_NON_GPT_MODELS) - self.gpt4v_token_limit = get_token_limit(gpt4v_model) ===========changed ref 2=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: q = messages[-1]["content"] if not isinstance(q, str): raise ValueError("The most recent message content must be a string.") overrides = context.get("overrides", {}) seed = overrides.get("seed", None) auth_claims = context.get("auth_claims", {}) use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_ranker = True if overrides.get("semantic_ranker") else False use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) filter = self.build_filter(overrides, auth_claims) # If retrieval mode includes vectors, compute an embedding for the query vectors: list[VectorQuery] = [] if use_vector_search: vectors.append(await self.compute_text_embedding(q)) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=False)</s> ===========changed ref 3=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: # offset: 1 <s>_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=False) # Append user message content = "\n".join(sources_content) user_content = q + "\n" + f"Sources:\n {content}" response_token_limit = 1024 updated_messages = build_messages( model=self.chatgpt_model, system_prompt=overrides.get("prompt_template", self.system_chat_template), few_shots=[{"role": "user", "content": self.question}, {"role": "assistant", "content": self.answer}], new_user_content=user_content, max_tokens=self.chatgpt_token_limit - response_token_limit, + fallback_to_default=self.ALLOW_NON_GPT_MODELS, ) chat_completion = await self.openai_client.chat.completions.create( # Azure OpenAI takes the deployment name as the model name model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, messages=updated_messages, temperature=overrides.get("temperature", 0.3), max_tokens=response_token_limit, n=1, seed=seed, ) data_points = {"text": sources_content} extra_info = { "data_points": data_points, "thoughts": [ ThoughtStep( "Search using user query", q, { </s>
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
9722c788226009ab213452b8f502bed85a0a1afc
Changes for easier working with local models (#1992)
<12>:<add> self.chatgpt_token_limit = get_token_limit(chatgpt_model, default_to_minimum=self.ALLOW_NON_GPT_MODELS) <del> self.chatgpt_token_limit = get_token_limit(chatgpt_model)
<s> openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): <0> self.search_client = search_client <1> self.openai_client = openai_client <2> self.auth_helper = auth_helper <3> self.chatgpt_model = chatgpt_model <4> self.chatgpt_deployment = chatgpt_deployment <5> self.embedding_deployment = embedding_deployment <6> self.embedding_model = embedding_model <7> self.embedding_dimensions = embedding_dimensions <8> self.sourcepage_field = sourcepage_field <9> self.content_field = content_field <10> self.query_language = query_language <11> self.query_speller = query_speller <12> self.chatgpt_token_limit = get_token_limit(chatgpt_model) <13>
===========unchanged ref 0=========== at: approaches.approach.Approach ALLOW_NON_GPT_MODELS = True __init__(self, search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: core.authentication AuthenticationHelper(search_index: Optional[SearchIndex], use_authentication: bool, server_app_id: Optional[str], server_app_secret: Optional[str], client_app_id: Optional[str], tenant_id: Optional[str], require_access_control: bool=False, enable_global_documents: bool=False, enable_unauthenticated_access: bool=False) ===========changed ref 0=========== <s> openai_client: AsyncOpenAI, chatgpt_model: str, chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI embedding_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, ): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.openai_client = openai_client self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.embedding_model = embedding_model self.embedding_dimensions = embedding_dimensions self.chatgpt_deployment = chatgpt_deployment self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller + self.chatgpt_token_limit = get_token_limit(chatgpt_model, self.ALLOW_NON_GPT_MODELS) - self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 1=========== <s> Optional[str], gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client self.auth_helper = auth_helper self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider + self.gpt4v_token_limit = get_token_limit(gpt4v_model, self.ALLOW_NON_GPT_MODELS) - self.gpt4v_token_limit = get_token_limit(gpt4v_model) ===========changed ref 2=========== <s>Azure OpenAI gpt4v_model: str, embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text" embedding_model: str, embedding_dimensions: int, sourcepage_field: str, content_field: str, query_language: str, query_speller: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]] ): self.search_client = search_client self.blob_container_client = blob_container_client self.openai_client = openai_client self.auth_helper = auth_helper self.chatgpt_model = chatgpt_model self.chatgpt_deployment = chatgpt_deployment self.gpt4v_deployment = gpt4v_deployment self.gpt4v_model = gpt4v_model self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.embedding_dimensions = embedding_dimensions self.sourcepage_field = sourcepage_field self.content_field = content_field self.query_language = query_language self.query_speller = query_speller self.vision_endpoint = vision_endpoint self.vision_token_provider = vision_token_provider + self.chatgpt_token_limit = get_token_limit(gpt4v_model, default_to_minimum=self.ALLOW_NON_GPT_MODELS) - self.chatgpt_token_limit = get_token_limit(gpt4v_model) ===========changed ref 3=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run( self, messages: list[ChatCompletionMessageParam], session_state: Any = None, context: dict[str, Any] = {}, ) -> dict[str, Any]: q = messages[-1]["content"] if not isinstance(q, str): raise ValueError("The most recent message content must be a string.") overrides = context.get("overrides", {}) seed = overrides.get("seed", None) auth_claims = context.get("auth_claims", {}) use_text_search = overrides.get("retrieval_mode") in ["text", "hybrid", None] use_vector_search = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_ranker = True if overrides.get("semantic_ranker") else False use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) filter = self.build_filter(overrides, auth_claims) # If retrieval mode includes vectors, compute an embedding for the query vectors: list[VectorQuery] = [] if use_vector_search: vectors.append(await self.compute_text_embedding(q)) results = await self.search( top, q, filter, vectors, use_text_search, use_vector_search, use_semantic_ranker, use_semantic_captions, minimum_search_score, minimum_reranker_score, ) # Process results sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=False)</s>
tests.test_app_config/test_app_user_upload_processors
Modified
Azure-Samples~azure-search-openai-demo
2dd7ba9f062ddd55f3fd45e2da748834c5d1c94e
Adding CSV Parser (#1996)
<8>:<add> assert len(ingester.file_processors.keys()) == 6 <del> assert len(ingester.file_processors.keys()) == 5
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors(monkeypatch, minimal_env): <0> monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") <1> monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") <2> monkeypatch.setenv("USE_USER_UPLOAD", "true") <3> <4> quart_app = app.create_app() <5> async with quart_app.test_app(): <6> ingester = quart_app.config[app.CONFIG_INGESTER] <7> assert ingester is not None <8> assert len(ingester.file_processors.keys()) == 5 <9>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None]
tests.test_app_config/test_app_user_upload_processors_docint
Modified
Azure-Samples~azure-search-openai-demo
2dd7ba9f062ddd55f3fd45e2da748834c5d1c94e
Adding CSV Parser (#1996)
<9>:<add> assert len(ingester.file_processors.keys()) == 15 <del> assert len(ingester.file_processors.keys()) == 14
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): <0> monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") <1> monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") <2> monkeypatch.setenv("USE_USER_UPLOAD", "true") <3> monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") <4> <5> quart_app = app.create_app() <6> async with quart_app.test_app(): <7> ingester = quart_app.config[app.CONFIG_INGESTER] <8> assert ingester is not None <9> assert len(ingester.file_processors.keys()) == 14 <10>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") monkeypatch.setenv("USE_USER_UPLOAD", "true") quart_app = app.create_app() async with quart_app.test_app(): ingester = quart_app.config[app.CONFIG_INGESTER] assert ingester is not None + assert len(ingester.file_processors.keys()) == 6 - assert len(ingester.file_processors.keys()) == 5
tests.test_app_config/test_app_user_upload_processors_docint_localpdf
Modified
Azure-Samples~azure-search-openai-demo
2dd7ba9f062ddd55f3fd45e2da748834c5d1c94e
Adding CSV Parser (#1996)
<10>:<add> assert len(ingester.file_processors.keys()) == 15 <del> assert len(ingester.file_processors.keys()) == 14
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors_docint_localpdf(monkeypatch, minimal_env): <0> monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") <1> monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") <2> monkeypatch.setenv("USE_USER_UPLOAD", "true") <3> monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") <4> monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") <5> <6> quart_app = app.create_app() <7> async with quart_app.test_app(): <8> ingester = quart_app.config[app.CONFIG_INGESTER] <9> assert ingester is not None <10> assert len(ingester.file_processors.keys()) == 14 <11> assert ingester.file_processors[".pdf"] is not ingester.file_processors[".pptx"] <12>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") monkeypatch.setenv("USE_USER_UPLOAD", "true") quart_app = app.create_app() async with quart_app.test_app(): ingester = quart_app.config[app.CONFIG_INGESTER] assert ingester is not None + assert len(ingester.file_processors.keys()) == 6 - assert len(ingester.file_processors.keys()) == 5 ===========changed ref 1=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") monkeypatch.setenv("USE_USER_UPLOAD", "true") monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") quart_app = app.create_app() async with quart_app.test_app(): ingester = quart_app.config[app.CONFIG_INGESTER] assert ingester is not None + assert len(ingester.file_processors.keys()) == 15 - assert len(ingester.file_processors.keys()) == 14
tests.test_app_config/test_app_user_upload_processors_docint_localhtml
Modified
Azure-Samples~azure-search-openai-demo
2dd7ba9f062ddd55f3fd45e2da748834c5d1c94e
Adding CSV Parser (#1996)
<10>:<add> assert len(ingester.file_processors.keys()) == 15 <del> assert len(ingester.file_processors.keys()) == 14
# module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors_docint_localhtml(monkeypatch, minimal_env): <0> monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") <1> monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") <2> monkeypatch.setenv("USE_USER_UPLOAD", "true") <3> monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") <4> monkeypatch.setenv("USE_LOCAL_HTML_PARSER", "true") <5> <6> quart_app = app.create_app() <7> async with quart_app.test_app(): <8> ingester = quart_app.config[app.CONFIG_INGESTER] <9> assert ingester is not None <10> assert len(ingester.file_processors.keys()) == 14 <11> assert ingester.file_processors[".html"] is not ingester.file_processors[".pptx"] <12>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") monkeypatch.setenv("USE_USER_UPLOAD", "true") quart_app = app.create_app() async with quart_app.test_app(): ingester = quart_app.config[app.CONFIG_INGESTER] assert ingester is not None + assert len(ingester.file_processors.keys()) == 6 - assert len(ingester.file_processors.keys()) == 5 ===========changed ref 1=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") monkeypatch.setenv("USE_USER_UPLOAD", "true") monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") quart_app = app.create_app() async with quart_app.test_app(): ingester = quart_app.config[app.CONFIG_INGESTER] assert ingester is not None + assert len(ingester.file_processors.keys()) == 15 - assert len(ingester.file_processors.keys()) == 14 ===========changed ref 2=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors_docint_localpdf(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") monkeypatch.setenv("USE_USER_UPLOAD", "true") monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") quart_app = app.create_app() async with quart_app.test_app(): ingester = quart_app.config[app.CONFIG_INGESTER] assert ingester is not None + assert len(ingester.file_processors.keys()) == 15 - assert len(ingester.file_processors.keys()) == 14 assert ingester.file_processors[".pdf"] is not ingester.file_processors[".pptx"]
app.backend.prepdocs/setup_file_processors
Modified
Azure-Samples~azure-search-openai-demo
2dd7ba9f062ddd55f3fd45e2da748834c5d1c94e
Adding CSV Parser (#1996)
# module: app.backend.prepdocs def setup_file_processors( azure_credential: AsyncTokenCredential, document_intelligence_service: Union[str, None], document_intelligence_key: Union[str, None] = None, local_pdf_parser: bool = False, local_html_parser: bool = False, search_images: bool = False, ): <0> sentence_text_splitter = SentenceTextSplitter(has_image_embeddings=search_images) <1> <2> doc_int_parser: Optional[DocumentAnalysisParser] = None <3> # check if Azure Document Intelligence credentials are provided <4> if document_intelligence_service is not None: <5> documentintelligence_creds: Union[AsyncTokenCredential, AzureKeyCredential] = ( <6> azure_credential if document_intelligence_key is None else AzureKeyCredential(document_intelligence_key) <7> ) <8> doc_int_parser = DocumentAnalysisParser( <9> endpoint=f"https://{document_intelligence_service}.cognitiveservices.azure.com/", <10> credential=documentintelligence_creds, <11> ) <12> <13> pdf_parser: Optional[Parser] = None <14> if local_pdf_parser or document_intelligence_service is None: <15> pdf_parser = LocalPdfParser() <16> elif document_intelligence_service is not None: <17> pdf_parser = doc_int_parser <18> else: <19> logger.warning("No PDF parser available") <20> <21> html_parser: Optional[Parser] = None <22> if local_html_parser or document_intelligence_service is None: <23> html_parser = LocalHTMLParser() <24> elif document_intelligence_service is not None: <25> html_parser = doc_int_parser <26> else: <27> logger.warning("No HTML parser available") <28> <29> # These file formats can always be parsed: <30> file_processors = { <31> ".json": FileProcessor(JsonParser(),</s>
===========below chunk 0=========== # module: app.backend.prepdocs def setup_file_processors( azure_credential: AsyncTokenCredential, document_intelligence_service: Union[str, None], document_intelligence_key: Union[str, None] = None, local_pdf_parser: bool = False, local_html_parser: bool = False, search_images: bool = False, ): # offset: 1 ".md": FileProcessor(TextParser(), sentence_text_splitter), ".txt": FileProcessor(TextParser(), sentence_text_splitter), } # These require either a Python package or Document Intelligence if pdf_parser is not None: file_processors.update({".pdf": FileProcessor(pdf_parser, sentence_text_splitter)}) if html_parser is not None: file_processors.update({".html": FileProcessor(html_parser, sentence_text_splitter)}) # These file formats require Document Intelligence if doc_int_parser is not None: file_processors.update( { ".docx": FileProcessor(doc_int_parser, sentence_text_splitter), ".pptx": FileProcessor(doc_int_parser, sentence_text_splitter), ".xlsx": FileProcessor(doc_int_parser, sentence_text_splitter), ".png": FileProcessor(doc_int_parser, sentence_text_splitter), ".jpg": FileProcessor(doc_int_parser, sentence_text_splitter), ".jpeg": FileProcessor(doc_int_parser, sentence_text_splitter), ".tiff": FileProcessor(doc_int_parser, sentence_text_splitter), ".bmp": FileProcessor(doc_int_parser, sentence_text_splitter), ".heic": FileProcessor(doc_int_parser, sentence_text_splitter), } ) return file_processors ===========unchanged ref 0=========== at: app.backend.prepdocs logger = logging.getLogger("scripts") at: logging.Logger warning(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: prepdocslib.csvparser CsvParser() at: prepdocslib.fileprocessor FileProcessor(parser: Parser, splitter: TextSplitter) at: prepdocslib.htmlparser LocalHTMLParser() at: prepdocslib.jsonparser JsonParser() at: prepdocslib.parser Parser() at: prepdocslib.pdfparser LocalPdfParser() DocumentAnalysisParser(endpoint: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], model_id="prebuilt-layout") at: prepdocslib.textparser TextParser() at: prepdocslib.textsplitter SentenceTextSplitter(has_image_embeddings: bool, max_tokens_per_section: int=500) SimpleTextSplitter(max_object_length: int=1000) ===========changed ref 0=========== + # module: tests.test_csvparser + + ===========changed ref 1=========== + # module: app.backend.prepdocslib.csvparser + + ===========changed ref 2=========== + # module: app.backend.prepdocslib.csvparser + class CsvParser(Parser): + """ + Concrete parser that can parse CSV into Page objects. Each row becomes a Page object. + """ + ===========changed ref 3=========== + # module: tests.test_csvparser + @pytest.mark.asyncio + async def test_csvparser_empty_file(): + # Mock empty CSV content in binary format + file = io.BytesIO(b"") + file.name = "test.csv" + csvparser = CsvParser() + + # Parse the file + pages = [page async for page in csvparser.parse(file)] + + # Assertions + assert len(pages) == 0 # No rows should be parsed from an empty file + ===========changed ref 4=========== + # module: tests.test_csvparser + @pytest.mark.asyncio + async def test_csvparser_single_row(): + # Mock CSV content with a single row in binary format + file = io.BytesIO(b"col1,col2,col3\nvalue1,value2,value3") + file.name = "test.csv" + csvparser = CsvParser() + + # Parse the file + pages = [page async for page in csvparser.parse(file)] + + # Assertions + assert len(pages) == 1 + assert pages[0].page_num == 0 + assert pages[0].offset == 0 + assert pages[0].text == "value1,value2,value3" + ===========changed ref 5=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") monkeypatch.setenv("USE_USER_UPLOAD", "true") quart_app = app.create_app() async with quart_app.test_app(): ingester = quart_app.config[app.CONFIG_INGESTER] assert ingester is not None + assert len(ingester.file_processors.keys()) == 6 - assert len(ingester.file_processors.keys()) == 5 ===========changed ref 6=========== + # module: app.backend.prepdocslib.csvparser + class CsvParser(Parser): + def parse(self, content: IO) -> AsyncGenerator[Page, None]: + # Check if content is in bytes (binary file) and decode to string + content_str: str + if isinstance(content, (bytes, bytearray)): + content_str = content.decode("utf-8") + elif hasattr(content, "read"): # Handle BufferedReader + content_str = content.read().decode("utf-8") + + # Create a CSV reader from the text content + reader = csv.reader(content_str.splitlines()) + offset = 0 + + # Skip the header row + next(reader, None) + + for i, row in enumerate(reader): + page_text = ",".join(row) + yield Page(i, offset, page_text) + offset += len(page_text) + 1 # Account for newline character + ===========changed ref 7=========== # module: tests.test_app_config @pytest.mark.asyncio async def test_app_user_upload_processors_docint(monkeypatch, minimal_env): monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-user-storage-account") monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-user-storage-container") monkeypatch.setenv("USE_USER_UPLOAD", "true") monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-docint-service") quart_app = app.create_app() async with quart_app.test_app(): ingester = quart_app.config[app.CONFIG_INGESTER] assert ingester is not None + assert len(ingester.file_processors.keys()) == 15 - assert len(ingester.file_processors.keys()) == 14
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
be26d31e38b6058bb7cfe3d8190b4ea8861c8b63
Add chat history feature (#1988)
<13>:<add> # If session state is provided, persists the session state, <add> # else creates a new session_id depending on the chat history options enabled. <add> session_state = request_json.get("session_state") <add> if session_state is None: <add> session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) <16>:<add> session_state=session_state, <del> session_state=request_json.get("session_state"),
# module: app.backend.app @bp.route("/chat", methods=["POST"]) @authenticated async def chat(auth_claims: Dict[str, Any]): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> context["auth_claims"] = auth_claims <5> try: <6> use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) <7> approach: Approach <8> if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: <9> approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) <10> else: <11> approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) <12> <13> result = await approach.run( <14> request_json["messages"], <15> context=context, <16> session_state=request_json.get("session_state"), <17> ) <18> return jsonify(result) <19> except Exception as error: <20> return error_response(error, "/chat") <21>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: approaches.approach Approach(search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: approaches.approach.Approach ALLOW_NON_GPT_MODELS = True run(messages: list[ChatCompletionMessageParam], session_state: Any=None, context: dict[str, Any]={}) -> dict[str, Any] at: config CONFIG_CHAT_VISION_APPROACH = "chat_vision_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_CHAT_HISTORY_BROWSER_ENABLED = "chat_history_browser_enabled" at: core.sessionhelper create_session_id(config_chat_history_browser_enabled: bool) -> Union[str, None] at: decorators authenticated(route_fn: Callable[[Dict[str, Any]], Any]) at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any Dict = _alias(dict, 2, inst=False, name='Dict')
app.backend.app/chat_stream
Modified
Azure-Samples~azure-search-openai-demo
be26d31e38b6058bb7cfe3d8190b4ea8861c8b63
Add chat history feature (#1988)
<13>:<add> # If session state is provided, persists the session state, <add> # else creates a new session_id depending on the chat history options enabled. <add> session_state = request_json.get("session_state") <add> if session_state is None: <add> session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) <16>:<add> session_state=session_state, <del> session_state=request_json.get("session_state"),
# module: app.backend.app @bp.route("/chat/stream", methods=["POST"]) @authenticated async def chat_stream(auth_claims: Dict[str, Any]): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> context["auth_claims"] = auth_claims <5> try: <6> use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) <7> approach: Approach <8> if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: <9> approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) <10> else: <11> approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) <12> <13> result = await approach.run_stream( <14> request_json["messages"], <15> context=context, <16> session_state=request_json.get("session_state"), <17> ) <18> response = await make_response(format_as_ndjson(result)) <19> response.timeout = None # type: ignore <20> response.mimetype = "application/json-lines" <21> return response <22> except Exception as error: <23> return error_response(error, "/chat") <24>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: app.backend.app.chat session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) session_state = request_json.get("session_state") result = await approach.run( request_json["messages"], context=context, session_state=session_state, ) at: approaches.approach Approach(search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: config CONFIG_CHAT_VISION_APPROACH = "chat_vision_approach" CONFIG_CHAT_APPROACH = "chat_approach" at: decorators authenticated(route_fn: Callable[[Dict[str, Any]], Any]) at: error error_response(error: Exception, route: str, status_code: int=500) at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) @authenticated async def chat(auth_claims: Dict[str, Any]): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) + # If session state is provided, persists the session state, + # else creates a new session_id depending on the chat history options enabled. + session_state = request_json.get("session_state") + if session_state is None: + session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) result = await approach.run( request_json["messages"], context=context, + session_state=session_state, - session_state=request_json.get("session_state"), ) return jsonify(result) except Exception as error: return error_response(error, "/chat")
app.backend.app/config
Modified
Azure-Samples~azure-search-openai-demo
be26d31e38b6058bb7cfe3d8190b4ea8861c8b63
Add chat history feature (#1988)
<10>:<add> "showChatHistoryBrowser": current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED],
# module: app.backend.app @bp.route("/config", methods=["GET"]) def config(): <0> return jsonify( <1> { <2> "showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED], <3> "showSemanticRankerOption": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED], <4> "showVectorOption": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED], <5> "showUserUpload": current_app.config[CONFIG_USER_UPLOAD_ENABLED], <6> "showLanguagePicker": current_app.config[CONFIG_LANGUAGE_PICKER_ENABLED], <7> "showSpeechInput": current_app.config[CONFIG_SPEECH_INPUT_ENABLED], <8> "showSpeechOutputBrowser": current_app.config[CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED], <9> "showSpeechOutputAzure": current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED], <10> } <11> ) <12>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: app.backend.app.chat_stream response = await make_response(format_as_ndjson(result)) at: config CONFIG_AUTH_CLIENT = "auth_client" at: error error_response(error: Exception, route: str, status_code: int=500) ===========changed ref 0=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) @authenticated async def chat(auth_claims: Dict[str, Any]): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) + # If session state is provided, persists the session state, + # else creates a new session_id depending on the chat history options enabled. + session_state = request_json.get("session_state") + if session_state is None: + session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) result = await approach.run( request_json["messages"], context=context, + session_state=session_state, - session_state=request_json.get("session_state"), ) return jsonify(result) except Exception as error: return error_response(error, "/chat") ===========changed ref 1=========== # module: app.backend.app @bp.route("/chat/stream", methods=["POST"]) @authenticated async def chat_stream(auth_claims: Dict[str, Any]): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) + # If session state is provided, persists the session state, + # else creates a new session_id depending on the chat history options enabled. + session_state = request_json.get("session_state") + if session_state is None: + session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) result = await approach.run_stream( request_json["messages"], context=context, + session_state=session_state, - session_state=request_json.get("session_state"), ) response = await make_response(format_as_ndjson(result)) response.timeout = None # type: ignore response.mimetype = "application/json-lines" return response except Exception as error: return error_response(error, "/chat")
app.backend.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.__init__
Modified
Azure-Samples~azure-search-openai-demo
31f501a16244fdf65eafe117931cb3696b9d5255
Updates to integrated vectorization (#2045)
<0>:<del> if not embeddings or not isinstance(embeddings, AzureOpenAIEmbeddingService): <1>:<del> raise Exception("Expecting AzureOpenAI embedding service") <2>:<del>
<s>Strategy, blob_manager: BlobManager, search_info: SearchInfo, + embeddings: AzureOpenAIEmbeddingService, - embeddings: Optional[AzureOpenAIEmbeddingService], subscription_id: str, search_service_user_assigned_id: str, document_action: DocumentAction = DocumentAction.Add, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): <0> if not embeddings or not isinstance(embeddings, AzureOpenAIEmbeddingService): <1> raise Exception("Expecting AzureOpenAI embedding service") <2> <3> self.list_file_strategy = list_file_strategy <4> self.blob_manager = blob_manager <5> self.document_action = document_action <6> self.embeddings = embeddings <7> self.subscription_id = subscription_id <8> self.search_user_assigned_identity = search_service_user_assigned_id <9> self.search_analyzer_name = search_analyzer_name <10> self.use_acls = use_acls <11> self.category = category <12> self.search_info = search_info <13>
===========unchanged ref 0=========== at: app.backend.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy create_embedding_skill(self, index_name: str) at: app.backend.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.create_embedding_skill skillset_name = f"{index_name}-skillset" ===========changed ref 0=========== # module: app.backend.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): + logger.info("Checking whether search index %s exists...", self.search_info.index_name) - logger.info("Ensuring search index %s exists", self.search_info.index_name) async with self.search_info.create_search_index_client() as search_index_client: + + if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: + logger.info("Creating new search index %s", self.search_info.index_name) + fields = [ - fields = [ + ( - ( + SimpleField(name="id", type="Edm.String", key=True) - SimpleField(name="id", type="Edm.String", key=True) + if not self.use_int_vectorization - if not self.use_int_vectorization + else SearchField( - else SearchField( + name="id", - name="id", + type="Edm.String", + key=True, + sortable=True, + filterable=True, + facetable=True, + analyzer_name="keyword", + ) + ), + SearchableField( + name="content", type="Edm.String", - key=True, - sortable=True, - filterable=True, - facetable=True, + analyzer_name=self.search_analyzer_name, - analyzer_name="keyword", + ), - ) - ), - SearchableField( - name="content", - type="Edm.String", - analyzer_name=self.search_analyzer_name, - ), - SearchField( - name="embedding", - type=SearchFieldDataType.Collection</s> ===========changed ref 1=========== # module: app.backend.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 1 <s>name, - ), - SearchField( - name="embedding", - type=SearchFieldDataType.Collection(SearchFieldDataType.Single), - hidden=False, - searchable=True, - filterable=False, - sortable=False, - facetable=False, - vector_search_dimensions=self.embedding_dimensions, - vector_search_profile_name="embedding_config", - ), - SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), - SimpleField( - name="sourcepage", - type="Edm.String", - filterable=True, - facetable=True, - ), - SimpleField( - name="sourcefile", - type="Edm.String", - filterable=True, - facetable=True, - ), - SimpleField( - name="storageUrl", - type="Edm.String", - filterable=True, - facetable=False, - ), - ] - if self.use_acls: - fields.append( - SimpleField( - name="oids", - type=SearchFieldDataType.Collection(SearchFieldDataType.String), - filterable=True, - ) - ) - fields.append( - SimpleField( - name="groups", - type=SearchFieldDataType.Collection(SearchFieldDataType.String), - filterable=True, - ) - ) - if self.use_int_vectorization: - fields.append(SearchableField(name="parent_id", type="Edm.String", filterable</s> ===========changed ref 2=========== # module: app.backend.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 2 <s>)) - if self.search_images: - fields.append( SearchField( + name="embedding", - name="imageEmbedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, + vector_search_dimensions=self.embedding_dimensions, - vector_search_dimensions=1024, vector_search_profile_name="embedding_config", + ), + SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), + SimpleField( + name="sourcepage", + type="Edm.String", + filterable=True, + facetable=True, + ), + SimpleField( + name="sourcefile", + type="Edm.String", + filterable=True, + facetable=True, + ), + SimpleField( + name="storageUrl", + type="Edm.String", + filterable=True, + facetable=False, + ), + ] + if self.use_acls: + fields.append( + SimpleField( + name="oids", + type=SearchFieldDataType.Collection(SearchFieldDataType.String), + filterable=True, + ) + ) + fields.append( + SimpleField( + name="groups", + type=SearchFieldDataType.Collection(SearchFieldDataType.String), + filterable=True, + ) + ) + if self.use_int_vectorization: + </s>
app.backend.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.create_embedding_skill
Modified
Azure-Samples~azure-search-openai-demo
31f501a16244fdf65eafe117931cb3696b9d5255
Updates to integrated vectorization (#2045)
<3>:<add> name=f"{index_name}-split-skill", <14>:<del> if self.embeddings is None: <15>:<del> raise ValueError("Expecting Azure Open AI instance") <16>:<del> <18>:<add> name=f"{index_name}-embedding-skill", <20>:<add> resource_url=f"https://{self.embeddings.open_ai_service}.openai.azure.com", <del> resource_uri=f"https://{self.embeddings.open_ai_service}.openai.azure.com", <21>:<add> deployment_name=self.embeddings.open_ai_deployment, <del> deployment_id=self.embeddings.open_ai_deployment, <22>:<add> model_name=self.embeddings.open_ai_model_name, <add> dimensions=self.embeddings.open_ai_dimensions, <28>:<add> index_projection = SearchIndexerIndexProjection( <del> index_projections = SearchIndexerIndexProjections(
# module: app.backend.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): def create_embedding_skill(self, index_name: str): <0> skillset_name = f"{index_name}-skillset" <1> <2> split_skill = SplitSkill( <3> description="Split skill to chunk documents", <4> text_split_mode="pages", <5> context="/document", <6> maximum_page_length=2048, <7> page_overlap_length=20, <8> inputs=[ <9> InputFieldMappingEntry(name="text", source="/document/content"), <10> ], <11> outputs=[OutputFieldMappingEntry(name="textItems", target_name="pages")], <12> ) <13> <14> if self.embeddings is None: <15> raise ValueError("Expecting Azure Open AI instance") <16> <17> embedding_skill = AzureOpenAIEmbeddingSkill( <18> description="Skill to generate embeddings via Azure OpenAI", <19> context="/document/pages/*", <20> resource_uri=f"https://{self.embeddings.open_ai_service}.openai.azure.com", <21> deployment_id=self.embeddings.open_ai_deployment, <22> inputs=[ <23> InputFieldMappingEntry(name="text", source="/document/pages/*"), <24> ], <25> outputs=[OutputFieldMappingEntry(name="embedding", target_name="vector")], <26> ) <27> <28> index_projections = SearchIndexerIndexProjections( <29> selectors=[ <30> SearchIndexerIndexProjectionSelector( <31> target_index_name=index_name, <32> parent_key_field_name="parent_id", <33> source_context="/document/pages/*", <34> mappings=[ <35> InputFieldMappingEntry(name="content", source="/document/pages/*"), <36> InputFieldMappingEntry(name="embedding", source="/document/pages/*/vector"), <37> InputFieldMappingEntry(name="sourcepage", source="/document/metadata_storage_name"), <38> ], <39> )</s>
===========below chunk 0=========== # module: app.backend.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): def create_embedding_skill(self, index_name: str): # offset: 1 ], parameters=SearchIndexerIndexProjectionsParameters( projection_mode=IndexProjectionMode.SKIP_INDEXING_PARENT_DOCUMENTS ), ) skillset = SearchIndexerSkillset( name=skillset_name, description="Skillset to chunk documents and generate embeddings", skills=[split_skill, embedding_skill], index_projections=index_projections, ) return skillset ===========unchanged ref 0=========== at: app.backend.prepdocslib.integratedvectorizerstrategy.IntegratedVectorizerStrategy.__init__ self.embeddings = embeddings at: prepdocslib.embeddings.AzureOpenAIEmbeddingService.__init__ self.open_ai_service = open_ai_service self.open_ai_deployment = open_ai_deployment at: prepdocslib.embeddings.OpenAIEmbeddings.__init__ self.open_ai_model_name = open_ai_model_name self.open_ai_dimensions = open_ai_dimensions ===========changed ref 0=========== <s>Strategy, blob_manager: BlobManager, search_info: SearchInfo, + embeddings: AzureOpenAIEmbeddingService, - embeddings: Optional[AzureOpenAIEmbeddingService], subscription_id: str, search_service_user_assigned_id: str, document_action: DocumentAction = DocumentAction.Add, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): - if not embeddings or not isinstance(embeddings, AzureOpenAIEmbeddingService): - raise Exception("Expecting AzureOpenAI embedding service") - self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.document_action = document_action self.embeddings = embeddings self.subscription_id = subscription_id self.search_user_assigned_identity = search_service_user_assigned_id self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.category = category self.search_info = search_info ===========changed ref 1=========== # module: app.backend.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): + logger.info("Checking whether search index %s exists...", self.search_info.index_name) - logger.info("Ensuring search index %s exists", self.search_info.index_name) async with self.search_info.create_search_index_client() as search_index_client: + + if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: + logger.info("Creating new search index %s", self.search_info.index_name) + fields = [ - fields = [ + ( - ( + SimpleField(name="id", type="Edm.String", key=True) - SimpleField(name="id", type="Edm.String", key=True) + if not self.use_int_vectorization - if not self.use_int_vectorization + else SearchField( - else SearchField( + name="id", - name="id", + type="Edm.String", + key=True, + sortable=True, + filterable=True, + facetable=True, + analyzer_name="keyword", + ) + ), + SearchableField( + name="content", type="Edm.String", - key=True, - sortable=True, - filterable=True, - facetable=True, + analyzer_name=self.search_analyzer_name, - analyzer_name="keyword", + ), - ) - ), - SearchableField( - name="content", - type="Edm.String", - analyzer_name=self.search_analyzer_name, - ), - SearchField( - name="embedding", - type=SearchFieldDataType.Collection</s> ===========changed ref 2=========== # module: app.backend.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): # offset: 1 <s>name, - ), - SearchField( - name="embedding", - type=SearchFieldDataType.Collection(SearchFieldDataType.Single), - hidden=False, - searchable=True, - filterable=False, - sortable=False, - facetable=False, - vector_search_dimensions=self.embedding_dimensions, - vector_search_profile_name="embedding_config", - ), - SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), - SimpleField( - name="sourcepage", - type="Edm.String", - filterable=True, - facetable=True, - ), - SimpleField( - name="sourcefile", - type="Edm.String", - filterable=True, - facetable=True, - ), - SimpleField( - name="storageUrl", - type="Edm.String", - filterable=True, - facetable=False, - ), - ] - if self.use_acls: - fields.append( - SimpleField( - name="oids", - type=SearchFieldDataType.Collection(SearchFieldDataType.String), - filterable=True, - ) - ) - fields.append( - SimpleField( - name="groups", - type=SearchFieldDataType.Collection(SearchFieldDataType.String), - filterable=True, - ) - ) - if self.use_int_vectorization: - fields.append(SearchableField(name="parent_id", type="Edm.String", filterable</s>
app.backend.prepdocslib.integratedvectorizerstrategy/IntegratedVectorizerStrategy.setup
Modified
Azure-Samples~azure-search-openai-demo
31f501a16244fdf65eafe117931cb3696b9d5255
Updates to integrated vectorization (#2045)
<0>:<add> logger.info("Setting up search index using integrated vectorization...") <9>:<del> if self.embeddings is None: <10>:<del> raise ValueError("Expecting Azure Open AI instance") <11>:<add> await search_manager.create_index() <12>:<del> await search_manager.create_index( <13>:<del> vectorizers=[ <14>:<del> AzureOpenAIVectorizer( <15>:<del> name=f"{self.search_info.index_name}-vectorizer", <16>:<del> kind="azureOpenAI", <17>:<del> azure_open_ai_parameters=AzureOpenAIParameters( <18>:<del> resource_uri=f"https://{self.embeddings.open_ai_service}.openai.azure.com", <19>:<del> deployment_id=self.embeddings.open_ai_deployment, <20>:<del> ), <21>:<del> ), <22>:<del> ] <23>:<del> ) <24>:<del> <25>:<del> # create indexer client <30>:<add> type=SearchIndexerDataSourceType.AZURE_BLOB, <del> type="azureblob", <37>:<del> logger.info("Search indexer data source connection updated.")
# module: app.backend.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): def setup(self): <0> search_manager = SearchManager( <1> search_info=self.search_info, <2> search_analyzer_name=self.search_analyzer_name, <3> use_acls=self.use_acls, <4> use_int_vectorization=True, <5> embeddings=self.embeddings, <6> search_images=False, <7> ) <8> <9> if self.embeddings is None: <10> raise ValueError("Expecting Azure Open AI instance") <11> <12> await search_manager.create_index( <13> vectorizers=[ <14> AzureOpenAIVectorizer( <15> name=f"{self.search_info.index_name}-vectorizer", <16> kind="azureOpenAI", <17> azure_open_ai_parameters=AzureOpenAIParameters( <18> resource_uri=f"https://{self.embeddings.open_ai_service}.openai.azure.com", <19> deployment_id=self.embeddings.open_ai_deployment, <20> ), <21> ), <22> ] <23> ) <24> <25> # create indexer client <26> ds_client = self.search_info.create_search_indexer_client() <27> ds_container = SearchIndexerDataContainer(name=self.blob_manager.container) <28> data_source_connection = SearchIndexerDataSourceConnection( <29> name=f"{self.search_info.index_name}-blob", <30> type="azureblob", <31> connection_string=self.blob_manager.get_managedidentity_connectionstring(), <32> container=ds_container, <33> data_deletion_detection_policy=NativeBlobSoftDeleteDeletionDetectionPolicy(), <34> ) <35> <36> await ds_client.create_or_update_data_source_connection(data_source_connection) <37> logger.info("Search indexer data source connection updated.") <38> <39> embedding_skillset = await self.create_embedding_skill(self.search_info.index_name</s>
===========below chunk 0=========== # module: app.backend.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): def setup(self): # offset: 1 await ds_client.create_or_update_skillset(embedding_skillset) await ds_client.close() ===========changed ref 0=========== <s>Strategy, blob_manager: BlobManager, search_info: SearchInfo, + embeddings: AzureOpenAIEmbeddingService, - embeddings: Optional[AzureOpenAIEmbeddingService], subscription_id: str, search_service_user_assigned_id: str, document_action: DocumentAction = DocumentAction.Add, search_analyzer_name: Optional[str] = None, use_acls: bool = False, category: Optional[str] = None, ): - if not embeddings or not isinstance(embeddings, AzureOpenAIEmbeddingService): - raise Exception("Expecting AzureOpenAI embedding service") - self.list_file_strategy = list_file_strategy self.blob_manager = blob_manager self.document_action = document_action self.embeddings = embeddings self.subscription_id = subscription_id self.search_user_assigned_identity = search_service_user_assigned_id self.search_analyzer_name = search_analyzer_name self.use_acls = use_acls self.category = category self.search_info = search_info ===========changed ref 1=========== # module: app.backend.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): def create_embedding_skill(self, index_name: str): skillset_name = f"{index_name}-skillset" split_skill = SplitSkill( + name=f"{index_name}-split-skill", description="Split skill to chunk documents", text_split_mode="pages", context="/document", maximum_page_length=2048, page_overlap_length=20, inputs=[ InputFieldMappingEntry(name="text", source="/document/content"), ], outputs=[OutputFieldMappingEntry(name="textItems", target_name="pages")], ) - if self.embeddings is None: - raise ValueError("Expecting Azure Open AI instance") - embedding_skill = AzureOpenAIEmbeddingSkill( + name=f"{index_name}-embedding-skill", description="Skill to generate embeddings via Azure OpenAI", context="/document/pages/*", + resource_url=f"https://{self.embeddings.open_ai_service}.openai.azure.com", - resource_uri=f"https://{self.embeddings.open_ai_service}.openai.azure.com", + deployment_name=self.embeddings.open_ai_deployment, - deployment_id=self.embeddings.open_ai_deployment, + model_name=self.embeddings.open_ai_model_name, + dimensions=self.embeddings.open_ai_dimensions, inputs=[ InputFieldMappingEntry(name="text", source="/document/pages/*"), ], outputs=[OutputFieldMappingEntry(name="embedding", target_name="vector")], ) + index_projection = SearchIndexerIndexProjection( - index_projections = SearchIndexerIndexProjections( selectors=[ SearchIndexerIndexProjectionSelector( target_index_name=index_name, parent_key_field_name</s> ===========changed ref 2=========== # module: app.backend.prepdocslib.integratedvectorizerstrategy class IntegratedVectorizerStrategy(Strategy): def create_embedding_skill(self, index_name: str): # offset: 1 <s>=[ SearchIndexerIndexProjectionSelector( target_index_name=index_name, parent_key_field_name="parent_id", source_context="/document/pages/*", mappings=[ InputFieldMappingEntry(name="content", source="/document/pages/*"), InputFieldMappingEntry(name="embedding", source="/document/pages/*/vector"), InputFieldMappingEntry(name="sourcepage", source="/document/metadata_storage_name"), ], ), ], parameters=SearchIndexerIndexProjectionsParameters( projection_mode=IndexProjectionMode.SKIP_INDEXING_PARENT_DOCUMENTS ), ) skillset = SearchIndexerSkillset( name=skillset_name, description="Skillset to chunk documents and generate embeddings", skills=[split_skill, embedding_skill], + index_projection=index_projection, - index_projections=index_projections, ) return skillset ===========changed ref 3=========== # module: app.backend.prepdocslib.searchmanager class SearchManager: def create_index(self, vectorizers: Optional[List[VectorSearchVectorizer]] = None): + logger.info("Checking whether search index %s exists...", self.search_info.index_name) - logger.info("Ensuring search index %s exists", self.search_info.index_name) async with self.search_info.create_search_index_client() as search_index_client: + + if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]: + logger.info("Creating new search index %s", self.search_info.index_name) + fields = [ - fields = [ + ( - ( + SimpleField(name="id", type="Edm.String", key=True) - SimpleField(name="id", type="Edm.String", key=True) + if not self.use_int_vectorization - if not self.use_int_vectorization + else SearchField( - else SearchField( + name="id", - name="id", + type="Edm.String", + key=True, + sortable=True, + filterable=True, + facetable=True, + analyzer_name="keyword", + ) + ), + SearchableField( + name="content", type="Edm.String", - key=True, - sortable=True, - filterable=True, - facetable=True, + analyzer_name=self.search_analyzer_name, - analyzer_name="keyword", + ), - ) - ), - SearchableField( - name="content", - type="Edm.String", - analyzer_name=self.search_analyzer_name, - ), - SearchField( - name="embedding", - type=SearchFieldDataType.Collection</s>
app.backend.prepdocslib.filestrategy/parse_file
Modified
Azure-Samples~azure-search-openai-demo
fa85034c85c213707da51e02861c758a82d4a74d
use lower() to match upper file extension name (#2069)
<0>:<add> key = file.file_extension().lower() <del> key = file.file_extension()
# module: app.backend.prepdocslib.filestrategy def parse_file( file: File, file_processors: dict[str, FileProcessor], category: Optional[str] = None, image_embeddings: Optional[ImageEmbeddings] = None, ) -> List[Section]: <0> key = file.file_extension() <1> processor = file_processors.get(key) <2> if processor is None: <3> logger.info("Skipping '%s', no parser found.", file.filename()) <4> return [] <5> logger.info("Ingesting '%s'", file.filename()) <6> pages = [page async for page in processor.parser.parse(content=file.content)] <7> logger.info("Splitting '%s' into sections", file.filename()) <8> if image_embeddings: <9> logger.warning("Each page will be split into smaller chunks of text, but images will be of the entire page.") <10> sections = [ <11> Section(split_page, content=file, category=category) for split_page in processor.splitter.split_pages(pages) <12> ] <13> return sections <14>
===========unchanged ref 0=========== at: app.backend.prepdocslib.filestrategy logger = logging.getLogger("scripts") at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None warning(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: prepdocslib.listfilestrategy.File file_extension() at: typing List = _alias(list, 1, inst=False, name='List')
app.backend.approaches.approach/Approach.build_filter
Modified
Azure-Samples~azure-search-openai-demo
0946893fe904cab1e89de2a38c4421e38d508608
Feature: include category (#2021)
<0>:<add> include_category = overrides.get("include_category") <3>:<add> if include_category: <add> filters.append("category eq '{}'".format(include_category.replace("'", "''")))
# module: app.backend.approaches.approach class Approach(ABC): def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Optional[str]: <0> exclude_category = overrides.get("exclude_category") <1> security_filter = self.auth_helper.build_security_filters(overrides, auth_claims) <2> filters = [] <3> if exclude_category: <4> filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) <5> if security_filter: <6> filters.append(security_filter) <7> return None if len(filters) == 0 else " and ".join(filters) <8>
===========unchanged ref 0=========== at: app.backend.approaches.approach.Approach ALLOW_NON_GPT_MODELS = True at: app.backend.approaches.approach.Approach.__init__ self.auth_helper = auth_helper at: core.authentication.AuthenticationHelper scope: str = "https://graph.microsoft.com/.default" build_security_filters(overrides: dict[str, Any], auth_claims: dict[str, Any]) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.prepdocs/setup_embeddings_service
Modified
Azure-Samples~azure-search-openai-demo
fa9a63873fce8dadb6db479d7f04b4c6839c35b4
Upgrade Azure OpenAI API version and use AZURE_OPENAI_API_VERSION consistently (#2105)
<14>:<add> open_ai_api_version=openai_api_version,
<s>: str, openai_model_name: str, openai_service: Union[str, None], openai_custom_url: Union[str, None], openai_deployment: Union[str, None], openai_dimensions: int, + openai_api_version: str, openai_key: Union[str, None], openai_org: Union[str, None], disable_vectors: bool = False, disable_batch_vectors: bool = False, ): <0> if disable_vectors: <1> logger.info("Not setting up embeddings service") <2> return None <3> <4> if openai_host != "openai": <5> azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( <6> azure_credential if openai_key is None else AzureKeyCredential(openai_key) <7> ) <8> return AzureOpenAIEmbeddingService( <9> open_ai_service=openai_service, <10> open_ai_custom_url=openai_custom_url, <11> open_ai_deployment=openai_deployment, <12> open_ai_model_name=openai_model_name, <13> open_ai_dimensions=openai_dimensions, <14> credential=azure_open_ai_credential, <15> disable_batch=disable_batch_vectors, <16> ) <17> else: <18> if openai_key is None: <19> raise ValueError("OpenAI key is required when using the non-Azure OpenAI API") <20> return OpenAIEmbeddingService( <21> open_ai_model_name=openai_model_name, <22> open_ai_dimensions=openai_dimensions, <23> credential=openai_key, <24> organization=openai_org, <25> disable_batch=disable_batch_vectors, <26> ) <27>
===========unchanged ref 0=========== at: app.backend.prepdocs logger = logging.getLogger("scripts") at: logging.Logger info(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: prepdocslib.embeddings AzureOpenAIEmbeddingService(open_ai_service: Union[str, None], open_ai_deployment: Union[str, None], open_ai_model_name: str, open_ai_dimensions: int, open_ai_api_version: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], open_ai_custom_url: Union[str, None]=None, disable_batch: bool=False) OpenAIEmbeddingService(open_ai_model_name: str, open_ai_dimensions: int, credential: str, organization: Optional[str]=None, disable_batch: bool=False)
tests.test_prepdocs/test_compute_embedding_success
Modified
Azure-Samples~azure-search-openai-demo
fa9a63873fce8dadb6db479d7f04b4c6839c35b4
Upgrade Azure OpenAI API version and use AZURE_OPENAI_API_VERSION consistently (#2105)
<28>:<add> open_ai_api_version="test-api-version",
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): <0> async def mock_create_client(*args, **kwargs): <1> # From https://platform.openai.com/docs/api-reference/embeddings/create <2> return MockClient( <3> embeddings_client=MockEmbeddingsClient( <4> create_embedding_response=openai.types.CreateEmbeddingResponse( <5> object="list", <6> data=[ <7> openai.types.Embedding( <8> embedding=[ <9> 0.0023064255, <10> -0.009327292, <11> -0.0028842222, <12> ], <13> index=0, <14> object="embedding", <15> ) <16> ], <17> model="text-embedding-ada-002", <18> usage=Usage(prompt_tokens=8, total_tokens=8), <19> ) <20> ) <21> ) <22> <23> embeddings = AzureOpenAIEmbeddingService( <24> open_ai_service="x", <25> open_ai_deployment="x", <26> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <27> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <28> credential=MockAzureCredential(), <29> disable_batch=False, <30> ) <31> monkeypatch.setattr(embeddings, "create_client", mock_create_client) <32> assert await embeddings.create_embeddings(texts=["foo"]) == [ <33> [ <34> 0.0023064255, <35> -0.009327292, <36> -0.0028842222, <37> ] <38> ] <39> <40> embeddings = AzureOpenAIEmbeddingService( <41> open_ai_service="x", <42> open_ai_deployment="x", <43> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <44> open_ai</s>
===========below chunk 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, credential=MockAzureCredential(), organization="org", disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, credential=MockAzureCredential(), organization="org", disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: tests.mocks MOCK_EMBEDDING_DIMENSIONS = 1536 MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" at: tests.test_prepdocs MockEmbeddingsClient(create_embedding_response: openai.types.CreateEmbeddingResponse) MockClient(embeddings_client) ===========changed ref 0=========== <s>: str, openai_model_name: str, openai_service: Union[str, None], openai_custom_url: Union[str, None], openai_deployment: Union[str, None], openai_dimensions: int, + openai_api_version: str, openai_key: Union[str, None], openai_org: Union[str, None], disable_vectors: bool = False, disable_batch_vectors: bool = False, ): if disable_vectors: logger.info("Not setting up embeddings service") return None if openai_host != "openai": azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( azure_credential if openai_key is None else AzureKeyCredential(openai_key) ) return AzureOpenAIEmbeddingService( open_ai_service=openai_service, open_ai_custom_url=openai_custom_url, open_ai_deployment=openai_deployment, open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, + open_ai_api_version=openai_api_version, credential=azure_open_ai_credential, disable_batch=disable_batch_vectors, ) else: if openai_key is None: raise ValueError("OpenAI key is required when using the non-Azure OpenAI API") return OpenAIEmbeddingService( open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, credential=openai_key, organization=openai_org, disable_batch=disable_batch_vectors, ) ===========changed ref 1=========== # module: app.backend.prepdocs if __name__ == "__main__": parser = argparse.ArgumentParser( description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", epilog="Example: prepdocs.py '.\\data\*' -v", ) parser.add_argument("files", nargs="?", help="Files to be processed") parser.add_argument( "--category", help="Value for the category field in the search index for all sections indexed in this run" ) parser.add_argument( "--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage" ) parser.add_argument( "--disablebatchvectors", action="store_true", help="Don't compute embeddings in batch for the sections" ) parser.add_argument( "--remove", action="store_true", help="Remove references to this document from blob storage and the search index", ) parser.add_argument( "--removeall", action="store_true", help="Remove all blobs from blob storage and documents from the search index", ) # Optional key specification: parser.add_argument( "--searchkey", required=False, help="Optional. Use this Azure AI Search account key instead of the current user identity to login (use az login to set current user for Azure)", ) parser.add_argument( "--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)", ) parser.add_argument( "--datalakekey", required=False, help="Optional. Use this key when authenticating to Azure Data Lake Gen2" ) parser.add_argument( "--documentintelligencekey", required=False, help="Optional. Use this Azure Document Intelligence account key instead of the current user identity to login (use az login to set current user</s>
tests.test_prepdocs/test_compute_embedding_ratelimiterror_batch
Modified
Azure-Samples~azure-search-openai-demo
fa9a63873fce8dadb6db479d7f04b4c6839c35b4
Upgrade Azure OpenAI API version and use AZURE_OPENAI_API_VERSION consistently (#2105)
<8>:<add> open_ai_api_version="test-api-version",
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): <0> with caplog.at_level(logging.INFO): <1> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <2> with pytest.raises(tenacity.RetryError): <3> embeddings = AzureOpenAIEmbeddingService( <4> open_ai_service="x", <5> open_ai_deployment="x", <6> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <7> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <8> credential=MockAzureCredential(), <9> disable_batch=False, <10> ) <11> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <12> await embeddings.create_embeddings(texts=["foo"]) <13> assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 <14>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: logging INFO = 20 at: tenacity RetryError(last_attempt: "Future") at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) at: tests.mocks MOCK_EMBEDDING_DIMENSIONS = 1536 MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" ===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): async def mock_create_client(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return MockClient( embeddings_client=MockEmbeddingsClient( create_embedding_response=openai.types.CreateEmbeddingResponse( object="list", data=[ openai.types.Embedding( embedding=[ 0.0023064255, -0.009327292, -0.0028842222, ], index=0, object="embedding", ) ], model="text-embedding-ada-002", usage=Usage(prompt_tokens=8, total_tokens=8), ) ) ) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="</s> ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s>_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, credential=MockAzureCredential(), organization="org", disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, credential=MockAzureCredential(), organization="org", disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.00230 ===========changed ref 2=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 2 <s>255, -0.009327292, -0.0028842222, ] ] ===========changed ref 3=========== <s>: str, openai_model_name: str, openai_service: Union[str, None], openai_custom_url: Union[str, None], openai_deployment: Union[str, None], openai_dimensions: int, + openai_api_version: str, openai_key: Union[str, None], openai_org: Union[str, None], disable_vectors: bool = False, disable_batch_vectors: bool = False, ): if disable_vectors: logger.info("Not setting up embeddings service") return None if openai_host != "openai": azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( azure_credential if openai_key is None else AzureKeyCredential(openai_key) ) return AzureOpenAIEmbeddingService( open_ai_service=openai_service, open_ai_custom_url=openai_custom_url, open_ai_deployment=openai_deployment, open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, + open_ai_api_version=openai_api_version, credential=azure_open_ai_credential, disable_batch=disable_batch_vectors, ) else: if openai_key is None: raise ValueError("OpenAI key is required when using the non-Azure OpenAI API") return OpenAIEmbeddingService( open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, credential=openai_key, organization=openai_org, disable_batch=disable_batch_vectors, )
tests.test_prepdocs/test_compute_embedding_ratelimiterror_single
Modified
Azure-Samples~azure-search-openai-demo
fa9a63873fce8dadb6db479d7f04b4c6839c35b4
Upgrade Azure OpenAI API version and use AZURE_OPENAI_API_VERSION consistently (#2105)
<8>:<add> open_ai_api_version="test-api-version",
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): <0> with caplog.at_level(logging.INFO): <1> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <2> with pytest.raises(tenacity.RetryError): <3> embeddings = AzureOpenAIEmbeddingService( <4> open_ai_service="x", <5> open_ai_deployment="x", <6> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <7> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <8> credential=MockAzureCredential(), <9> disable_batch=True, <10> ) <11> monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) <12> await embeddings.create_embeddings(texts=["foo"]) <13> assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 <14>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: logging INFO = 20 at: tenacity RetryError(last_attempt: "Future") at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) at: tests.mocks MOCK_EMBEDDING_DIMENSIONS = 1536 MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" ===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): async def mock_create_client(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return MockClient( embeddings_client=MockEmbeddingsClient( create_embedding_response=openai.types.CreateEmbeddingResponse( object="list", data=[ openai.types.Embedding( embedding=[ 0.0023064255, -0.009327292, -0.0028842222, ], index=0, object="embedding", ) ], model="text-embedding-ada-002", usage=Usage(prompt_tokens=8, total_tokens=8), ) ) ) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="</s> ===========changed ref 2=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s>_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, credential=MockAzureCredential(), organization="org", disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, credential=MockAzureCredential(), organization="org", disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.00230 ===========changed ref 3=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 2 <s>255, -0.009327292, -0.0028842222, ] ]
tests.test_prepdocs/test_compute_embedding_autherror
Modified
Azure-Samples~azure-search-openai-demo
fa9a63873fce8dadb6db479d7f04b4c6839c35b4
Upgrade Azure OpenAI API version and use AZURE_OPENAI_API_VERSION consistently (#2105)
<7>:<add> open_ai_api_version="test-api-version", <19>:<add> open_ai_api_version="test-api-version",
# module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_autherror(monkeypatch, capsys): <0> monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) <1> with pytest.raises(openai.AuthenticationError): <2> embeddings = AzureOpenAIEmbeddingService( <3> open_ai_service="x", <4> open_ai_deployment="x", <5> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <6> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <7> credential=MockAzureCredential(), <8> disable_batch=False, <9> ) <10> monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) <11> await embeddings.create_embeddings(texts=["foo"]) <12> <13> with pytest.raises(openai.AuthenticationError): <14> embeddings = AzureOpenAIEmbeddingService( <15> open_ai_service="x", <16> open_ai_deployment="x", <17> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <18> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <19> credential=MockAzureCredential(), <20> disable_batch=True, <21> ) <22> monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) <23> await embeddings.create_embeddings(texts=["foo"]) <24>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) at: tests.mocks MOCK_EMBEDDING_DIMENSIONS = 1536 MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" at: tests.test_prepdocs MockClient(embeddings_client) AuthenticationErrorMockEmbeddingsClient() ===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 2=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): async def mock_create_client(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return MockClient( embeddings_client=MockEmbeddingsClient( create_embedding_response=openai.types.CreateEmbeddingResponse( object="list", data=[ openai.types.Embedding( embedding=[ 0.0023064255, -0.009327292, -0.0028842222, ], index=0, object="embedding", ) ], model="text-embedding-ada-002", usage=Usage(prompt_tokens=8, total_tokens=8), ) ) ) embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="</s> ===========changed ref 3=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 1 <s>_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, credential=MockAzureCredential(), organization="org", disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.0023064255, -0.009327292, -0.0028842222, ] ] embeddings = OpenAIEmbeddingService( open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, credential=MockAzureCredential(), organization="org", disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", mock_create_client) assert await embeddings.create_embeddings(texts=["foo"]) == [ [ 0.00230 ===========changed ref 4=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_success(monkeypatch): # offset: 2 <s>255, -0.009327292, -0.0028842222, ] ]
tests.test_searchmanager/test_update_content_with_embeddings
Modified
Azure-Samples~azure-search-openai-demo
fa9a63873fce8dadb6db479d7f04b4c6839c35b4
Upgrade Azure OpenAI API version and use AZURE_OPENAI_API_VERSION consistently (#2105)
<34>:<add> open_ai_api_version="test-api-version",
# module: tests.test_searchmanager @pytest.mark.asyncio async def test_update_content_with_embeddings(monkeypatch, search_info): <0> async def mock_create_client(*args, **kwargs): <1> # From https://platform.openai.com/docs/api-reference/embeddings/create <2> return MockClient( <3> embeddings_client=MockEmbeddingsClient( <4> create_embedding_response=openai.types.CreateEmbeddingResponse( <5> object="list", <6> data=[ <7> openai.types.Embedding( <8> embedding=[ <9> 0.0023064255, <10> -0.009327292, <11> -0.0028842222, <12> ], <13> index=0, <14> object="embedding", <15> ) <16> ], <17> model="text-embedding-ada-002", <18> usage=Usage(prompt_tokens=8, total_tokens=8), <19> ) <20> ) <21> ) <22> <23> documents_uploaded = [] <24> <25> async def mock_upload_documents(self, documents): <26> documents_uploaded.extend(documents) <27> <28> monkeypatch.setattr(SearchClient, "upload_documents", mock_upload_documents) <29> embeddings = AzureOpenAIEmbeddingService( <30> open_ai_service="x", <31> open_ai_deployment="x", <32> open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, <33> open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, <34> credential=AzureKeyCredential("test"), <35> disable_batch=True, <36> ) <37> monkeypatch.setattr(embeddings, "create_client", mock_create_client) <38> manager = SearchManager( <39> search_info, <40> embeddings=embeddings, <41> ) <42> <43> test_io = io.BytesIO(b"test content") <44> test_io.name = "test/foo.pdf</s>
===========below chunk 0=========== # module: tests.test_searchmanager @pytest.mark.asyncio async def test_update_content_with_embeddings(monkeypatch, search_info): # offset: 1 file = File(test_io) await manager.update_content( [ Section( split_page=SplitPage( page_num=0, text="test content", ), content=file, category="test", ) ] ) assert len(documents_uploaded) == 1, "It should have uploaded one document" assert documents_uploaded[0]["embedding"] == [ 0.0023064255, -0.009327292, -0.0028842222, ] ===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: io BytesIO(initial_bytes: bytes=...) at: io.BytesIO name: Any at: tests.mocks MOCK_EMBEDDING_DIMENSIONS = 1536 MOCK_EMBEDDING_MODEL_NAME = "text-embedding-ada-002" MockEmbeddingsClient(create_embedding_response: openai.types.CreateEmbeddingResponse) MockClient(embeddings_client) ===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 2=========== <s>: str, openai_model_name: str, openai_service: Union[str, None], openai_custom_url: Union[str, None], openai_deployment: Union[str, None], openai_dimensions: int, + openai_api_version: str, openai_key: Union[str, None], openai_org: Union[str, None], disable_vectors: bool = False, disable_batch_vectors: bool = False, ): if disable_vectors: logger.info("Not setting up embeddings service") return None if openai_host != "openai": azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( azure_credential if openai_key is None else AzureKeyCredential(openai_key) ) return AzureOpenAIEmbeddingService( open_ai_service=openai_service, open_ai_custom_url=openai_custom_url, open_ai_deployment=openai_deployment, open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, + open_ai_api_version=openai_api_version, credential=azure_open_ai_credential, disable_batch=disable_batch_vectors, ) else: if openai_key is None: raise ValueError("OpenAI key is required when using the non-Azure OpenAI API") return OpenAIEmbeddingService( open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, credential=openai_key, organization=openai_org, disable_batch=disable_batch_vectors, ) ===========changed ref 3=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(openai.AuthenticationError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) await embeddings.create_embeddings(texts=["foo"]) with pytest.raises(openai.AuthenticationError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) await embeddings.create_embeddings(texts=["foo"])
app.backend.prepdocslib.embeddings/AzureOpenAIEmbeddingService.__init__
Modified
Azure-Samples~azure-search-openai-demo
fa9a63873fce8dadb6db479d7f04b4c6839c35b4
Upgrade Azure OpenAI API version and use AZURE_OPENAI_API_VERSION consistently (#2105)
<9>:<add> self.open_ai_api_version = open_ai_api_version
<s>OpenAIEmbeddings): def __init__( self, open_ai_service: Union[str, None], open_ai_deployment: Union[str, None], open_ai_model_name: str, open_ai_dimensions: int, + open_ai_api_version: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], open_ai_custom_url: Union[str, None] = None, disable_batch: bool = False, ): <0> super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) <1> self.open_ai_service = open_ai_service <2> if open_ai_service: <3> self.open_ai_endpoint = f"https://{open_ai_service}.openai.azure.com" <4> elif open_ai_custom_url: <5> self.open_ai_endpoint = open_ai_custom_url <6> else: <7> raise ValueError("Either open_ai_service or open_ai_custom_url must be provided") <8> self.open_ai_deployment = open_ai_deployment <9> self.credential = credential <10>
===========unchanged ref 0=========== at: app.backend.prepdocslib.embeddings.OpenAIEmbeddings SUPPORTED_BATCH_AOAI_MODEL = { "text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}, "text-embedding-3-small": {"token_limit": 8100, "max_batch_size": 16}, "text-embedding-3-large": {"token_limit": 8100, "max_batch_size": 16}, } SUPPORTED_DIMENSIONS_MODEL = { "text-embedding-ada-002": False, "text-embedding-3-small": True, "text-embedding-3-large": True, } __init__(self, open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool=False) __init__(open_ai_model_name: str, open_ai_dimensions: int, disable_batch: bool=False) ===========changed ref 0=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 2=========== <s>: str, openai_model_name: str, openai_service: Union[str, None], openai_custom_url: Union[str, None], openai_deployment: Union[str, None], openai_dimensions: int, + openai_api_version: str, openai_key: Union[str, None], openai_org: Union[str, None], disable_vectors: bool = False, disable_batch_vectors: bool = False, ): if disable_vectors: logger.info("Not setting up embeddings service") return None if openai_host != "openai": azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( azure_credential if openai_key is None else AzureKeyCredential(openai_key) ) return AzureOpenAIEmbeddingService( open_ai_service=openai_service, open_ai_custom_url=openai_custom_url, open_ai_deployment=openai_deployment, open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, + open_ai_api_version=openai_api_version, credential=azure_open_ai_credential, disable_batch=disable_batch_vectors, ) else: if openai_key is None: raise ValueError("OpenAI key is required when using the non-Azure OpenAI API") return OpenAIEmbeddingService( open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, credential=openai_key, organization=openai_org, disable_batch=disable_batch_vectors, ) ===========changed ref 3=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(openai.AuthenticationError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) await embeddings.create_embeddings(texts=["foo"]) with pytest.raises(openai.AuthenticationError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) await embeddings.create_embeddings(texts=["foo"])
app.backend.prepdocslib.embeddings/AzureOpenAIEmbeddingService.create_client
Modified
Azure-Samples~azure-search-openai-demo
fa9a63873fce8dadb6db479d7f04b4c6839c35b4
Upgrade Azure OpenAI API version and use AZURE_OPENAI_API_VERSION consistently (#2105)
<17>:<add> api_version=self.open_ai_api_version, <del> api_version="2023-05-15",
# module: app.backend.prepdocslib.embeddings class AzureOpenAIEmbeddingService(OpenAIEmbeddings): def create_client(self) -> AsyncOpenAI: <0> class AuthArgs(TypedDict, total=False): <1> api_key: str <2> azure_ad_token_provider: Callable[[], Union[str, Awaitable[str]]] <3> <4> auth_args = AuthArgs() <5> if isinstance(self.credential, AzureKeyCredential): <6> auth_args["api_key"] = self.credential.key <7> elif isinstance(self.credential, AsyncTokenCredential): <8> auth_args["azure_ad_token_provider"] = get_bearer_token_provider( <9> self.credential, "https://cognitiveservices.azure.com/.default" <10> ) <11> else: <12> raise TypeError("Invalid credential type") <13> <14> return AsyncAzureOpenAI( <15> azure_endpoint=self.open_ai_endpoint, <16> azure_deployment=self.open_ai_deployment, <17> api_version="2023-05-15", <18> **auth_args, <19> ) <20>
===========unchanged ref 0=========== at: app.backend.prepdocslib.embeddings.AzureOpenAIEmbeddingService.__init__ self.open_ai_endpoint = f"https://{open_ai_service}.openai.azure.com" self.open_ai_endpoint = open_ai_custom_url self.open_ai_deployment = open_ai_deployment at: app.backend.prepdocslib.embeddings.OpenAIEmbeddings create_client(self) -> AsyncOpenAI at: typing Awaitable = _alias(collections.abc.Awaitable, 1) Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== <s>OpenAIEmbeddings): def __init__( self, open_ai_service: Union[str, None], open_ai_deployment: Union[str, None], open_ai_model_name: str, open_ai_dimensions: int, + open_ai_api_version: str, credential: Union[AsyncTokenCredential, AzureKeyCredential], open_ai_custom_url: Union[str, None] = None, disable_batch: bool = False, ): super().__init__(open_ai_model_name, open_ai_dimensions, disable_batch) self.open_ai_service = open_ai_service if open_ai_service: self.open_ai_endpoint = f"https://{open_ai_service}.openai.azure.com" elif open_ai_custom_url: self.open_ai_endpoint = open_ai_custom_url else: raise ValueError("Either open_ai_service or open_ai_custom_url must be provided") self.open_ai_deployment = open_ai_deployment + self.open_ai_api_version = open_ai_api_version self.credential = credential ===========changed ref 1=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_single(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 2=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_ratelimiterror_batch(monkeypatch, caplog): with caplog.at_level(logging.INFO): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(tenacity.RetryError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_rate_limit_client) await embeddings.create_embeddings(texts=["foo"]) assert caplog.text.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 3=========== <s>: str, openai_model_name: str, openai_service: Union[str, None], openai_custom_url: Union[str, None], openai_deployment: Union[str, None], openai_dimensions: int, + openai_api_version: str, openai_key: Union[str, None], openai_org: Union[str, None], disable_vectors: bool = False, disable_batch_vectors: bool = False, ): if disable_vectors: logger.info("Not setting up embeddings service") return None if openai_host != "openai": azure_open_ai_credential: Union[AsyncTokenCredential, AzureKeyCredential] = ( azure_credential if openai_key is None else AzureKeyCredential(openai_key) ) return AzureOpenAIEmbeddingService( open_ai_service=openai_service, open_ai_custom_url=openai_custom_url, open_ai_deployment=openai_deployment, open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, + open_ai_api_version=openai_api_version, credential=azure_open_ai_credential, disable_batch=disable_batch_vectors, ) else: if openai_key is None: raise ValueError("OpenAI key is required when using the non-Azure OpenAI API") return OpenAIEmbeddingService( open_ai_model_name=openai_model_name, open_ai_dimensions=openai_dimensions, credential=openai_key, organization=openai_org, disable_batch=disable_batch_vectors, ) ===========changed ref 4=========== # module: tests.test_prepdocs @pytest.mark.asyncio async def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(tenacity.wait_random_exponential, "__call__", lambda x, y: 0) with pytest.raises(openai.AuthenticationError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=False, ) monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) await embeddings.create_embeddings(texts=["foo"]) with pytest.raises(openai.AuthenticationError): embeddings = AzureOpenAIEmbeddingService( open_ai_service="x", open_ai_deployment="x", open_ai_model_name=MOCK_EMBEDDING_MODEL_NAME, open_ai_dimensions=MOCK_EMBEDDING_DIMENSIONS, + open_ai_api_version="test-api-version", credential=MockAzureCredential(), disable_batch=True, ) monkeypatch.setattr(embeddings, "create_client", create_auth_error_limit_client) await embeddings.create_embeddings(texts=["foo"])
tests.e2e/test_chat_customization
Modified
Azure-Samples~azure-search-openai-demo
86b6dadc0209aa12116de6c7154f53fa46e38a6c
Refactor settings into a single component across Chat/Ask (#2111)
<3>:<add> assert overrides["temperature"] == 0.5 <add> assert overrides["seed"] == 123 <add> assert overrides["minimum_search_score"] == 0.5 <add> assert overrides["minimum_reranker_score"] == 0.5 <9>:<add> assert overrides["suggest_followup_questions"] is True <28>:<add> page.get_by_label("Temperature").click() <add> page.get_by_label("Temperature").fill("0.5") <add> page.get_by_label("Seed").click() <add> page.get_by_label("Seed").fill("123") <add> page.get_by_label("Minimum search score").click() <add> page.get_by_label("Minimum search score").fill("0.5") <add> page.get_by_label("Minimum reranker score").click() <add> page.get_by_label("Minimum reranker score").fill("0.5") <30>:<add>
# module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): <0> # Set up a mock route to the /chat endpoint <1> def handle(route: Route): <2> overrides = route.request.post_data_json["context"]["overrides"] <3> assert overrides["retrieval_mode"] == "vectors" <4> assert overrides["semantic_ranker"] is False <5> assert overrides["semantic_captions"] is True <6> assert overrides["top"] == 1 <7> assert overrides["prompt_template"] == "You are a cat and only talk about tuna." <8> assert overrides["exclude_category"] == "dogs" <9> assert overrides["use_oid_security_filter"] is False <10> assert overrides["use_groups_security_filter"] is False <11> <12> # Read the JSON from our snapshot results and return as the response <13> f = open("tests/snapshots/test_app/test_chat_text/client0/result.json") <14> json = f.read() <15> f.close() <16> route.fulfill(body=json, status=200) <17> <18> page.route("*/**/chat", handle) <19> <20> # Check initial page state <21> page.goto(live_server_url) <22> expect(page).to_have_title("Azure OpenAI + AI Search") <23> <24> # Customize all the settings <25> page.get_by_role("button", name="Developer settings").click() <26> page.get_by_label("Override prompt template").click() <27> page.get_by_label("Override prompt template").fill("You are a cat and only talk about tuna.") <28> page.get_by_label("Retrieve this many search results:").click() <29> page.get_by_label("Retrieve this many search results:").fill("1") <30> page.get_by_label("Exclude category").click() <31> page.get_by_label("Exclude category").fill("dogs") <32> page.get_by_text("Use semantic captions").click() <33> </s>
===========below chunk 0=========== # module: tests.e2e def test_chat_customization(page: Page, live_server_url: str): # offset: 1 page.get_by_text("Vectors + Text (Hybrid)").click() page.get_by_role("option", name="Vectors", exact=True).click() page.get_by_text("Stream chat completion responses").click() page.locator("button").filter(has_text="Close").click() # Ask a question and wait for the message to appear page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").click() page.get_by_placeholder("Type a new question (e.g. does my plan cover annual eye exams?)").fill( "Whats the dental plan?" ) page.get_by_role("button", name="Submit question").click() expect(page.get_by_text("Whats the dental plan?")).to_be_visible() expect(page.get_by_text("The capital of France is Paris.")).to_be_visible() expect(page.get_by_role("button", name="Clear chat")).to_be_enabled() ===========unchanged ref 0=========== at: io.BufferedReader close(self) -> None read(self, size: Optional[int]=..., /) -> bytes at: typing.IO __slots__ = () close() -> None read(n: int=...) -> AnyStr
app.backend.core.sessionhelper/create_session_id
Modified
Azure-Samples~azure-search-openai-demo
c3810e8faddb1303530df9581f68b41991e1b65a
Feature: Store chat history in Cosmos DB (#2063)
<0>:<add> if config_chat_history_cosmos_enabled: <add> return str(uuid.uuid4())
# module: app.backend.core.sessionhelper + def create_session_id( + config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool + ) -> Union[str, None]: - def create_session_id(config_chat_history_browser_enabled: bool) -> Union[str, None]: <0> if config_chat_history_browser_enabled: <1> return str(uuid.uuid4()) <2> return None <3>
===========changed ref 0=========== + # module: app.backend.chat_history.cosmosdb + + ===========changed ref 1=========== + # module: app.backend.chat_history.cosmosdb + chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") + ===========changed ref 2=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.after_app_serving + async def close_clients(): + if current_app.config.get(CONFIG_COSMOS_HISTORY_CLIENT): + cosmos_client: CosmosClient = current_app.config[CONFIG_COSMOS_HISTORY_CLIENT] + await cosmos_client.close() + ===========changed ref 3=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.delete("/chat_history/items/<item_id>") + @authenticated + async def delete_chat_history_session(auth_claims: Dict[str, Any], item_id: str): + if not current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED]: + return jsonify({"error": "Chat history not enabled"}), 400 + + container: ContainerProxy = current_app.config[CONFIG_COSMOS_HISTORY_CONTAINER] + if not container: + return jsonify({"error": "Chat history not enabled"}), 400 + + entra_oid = auth_claims.get("oid") + if not entra_oid: + return jsonify({"error": "User OID not found"}), 401 + + try: + await container.delete_item(item=item_id, partition_key=entra_oid) + return jsonify({}), 204 + except Exception as error: + return error_response(error, f"/chat_history/items/{item_id}") + ===========changed ref 4=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.get("/chat_history/items/<item_id>") + @authenticated + async def get_chat_history_session(auth_claims: Dict[str, Any], item_id: str): + if not current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED]: + return jsonify({"error": "Chat history not enabled"}), 400 + + container: ContainerProxy = current_app.config[CONFIG_COSMOS_HISTORY_CONTAINER] + if not container: + return jsonify({"error": "Chat history not enabled"}), 400 + + entra_oid = auth_claims.get("oid") + if not entra_oid: + return jsonify({"error": "User OID not found"}), 401 + + try: + res = await container.read_item(item=item_id, partition_key=entra_oid) + return ( + jsonify( + { + "id": res.get("id"), + "entra_oid": res.get("entra_oid"), + "title": res.get("title", "untitled"), + "timestamp": res.get("timestamp"), + "answers": res.get("answers", []), + } + ), + 200, + ) + except Exception as error: + return error_response(error, f"/chat_history/items/{item_id}") + ===========changed ref 5=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.post("/chat_history") + @authenticated + async def post_chat_history(auth_claims: Dict[str, Any]): + if not current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED]: + return jsonify({"error": "Chat history not enabled"}), 400 + + container: ContainerProxy = current_app.config[CONFIG_COSMOS_HISTORY_CONTAINER] + if not container: + return jsonify({"error": "Chat history not enabled"}), 400 + + entra_oid = auth_claims.get("oid") + if not entra_oid: + return jsonify({"error": "User OID not found"}), 401 + + try: + request_json = await request.get_json() + id = request_json.get("id") + answers = request_json.get("answers") + title = answers[0][0][:50] + "..." if len(answers[0][0]) > 50 else answers[0][0] + timestamp = int(time.time() * 1000) + + await container.upsert_item( + {"id": id, "entra_oid": entra_oid, "title": title, "answers": answers, "timestamp": timestamp} + ) + + return jsonify({}), 201 + except Exception as error: + return error_response(error, "/chat_history") + ===========changed ref 6=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.before_app_serving + async def setup_clients(): + USE_CHAT_HISTORY_COSMOS = os.getenv("USE_CHAT_HISTORY_COSMOS", "").lower() == "true" + AZURE_COSMOSDB_ACCOUNT = os.getenv("AZURE_COSMOSDB_ACCOUNT") + AZURE_CHAT_HISTORY_DATABASE = os.getenv("AZURE_CHAT_HISTORY_DATABASE") + AZURE_CHAT_HISTORY_CONTAINER = os.getenv("AZURE_CHAT_HISTORY_CONTAINER") + + azure_credential: Union[AzureDeveloperCliCredential, ManagedIdentityCredential] = current_app.config[ + CONFIG_CREDENTIAL + ] + + if USE_CHAT_HISTORY_COSMOS: + current_app.logger.info("USE_CHAT_HISTORY_COSMOS is true, setting up CosmosDB client") + if not AZURE_COSMOSDB_ACCOUNT: + raise ValueError("AZURE_COSMOSDB_ACCOUNT must be set when USE_CHAT_HISTORY_COSMOS is true") + if not AZURE_CHAT_HISTORY_DATABASE: + raise ValueError("AZURE_CHAT_HISTORY_DATABASE must be set when USE_CHAT_HISTORY_COSMOS is true") + if not AZURE_CHAT_HISTORY_CONTAINER: + raise ValueError("AZURE_CHAT_HISTORY_CONTAINER must be set when USE_CHAT_HISTORY_COSMOS is true") + cosmos_client = CosmosClient( + url=f"https://{AZURE_COSMOSDB_ACCOUNT}.documents.azure.com:443/", credential=azure_credential + ) + cosmos_db = cosmos_client.get_database_client(AZURE_CHAT_HISTORY_DATABASE) + cosmos_container = cosmos_db.get_container_client(AZURE_CHAT_HISTORY_CONTAINER) + </s> ===========changed ref 7=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.before_app_serving + async def setup_clients(): # offset: 1 <s> <add> cosmos_container = cosmos_db.get_container_client(AZURE_CHAT_HISTORY_CONTAINER) + + current_app.config[CONFIG_COSMOS_HISTORY_CLIENT] = cosmos_client + current_app.config[CONFIG_COSMOS_HISTORY_CONTAINER] = cosmos_container +
app.backend.decorators/authenticated
Modified
Azure-Samples~azure-search-openai-demo
c3810e8faddb1303530df9581f68b41991e1b65a
Feature: Store chat history in Cosmos DB (#2063)
<5>:<add> async def auth_handler(*args, **kwargs): <del> async def auth_handler(): <12>:<add> return await route_fn(auth_claims, *args, **kwargs) <del> return await route_fn(auth_claims) <14>:<add> return cast(_C, auth_handler) <del> return auth_handler
# module: app.backend.decorators + def authenticated(route_fn: _C) -> _C: - def authenticated(route_fn: Callable[[Dict[str, Any]], Any]): <0> """ <1> Decorator for routes that might require access control. Unpacks Authorization header information into an auth_claims dictionary <2> """ <3> <4> @wraps(route_fn) <5> async def auth_handler(): <6> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <7> try: <8> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <9> except AuthError: <10> abort(403) <11> <12> return await route_fn(auth_claims) <13> <14> return auth_handler <15>
===========unchanged ref 0=========== at: config CONFIG_AUTH_CLIENT = "auth_client" at: core.authentication AuthError(error, status_code) at: functools wraps(wrapped: _AnyCallable, assigned: Sequence[str]=..., updated: Sequence[str]=...) -> Callable[[_T], _T] at: typing TypeVar(name: str, *constraints: Type[Any], bound: Union[None, Type[Any], str]=..., covariant: bool=..., contravariant: bool=...) Callable = _CallableType(collections.abc.Callable, 2) ===========changed ref 0=========== + # module: app.backend.chat_history.cosmosdb + + ===========changed ref 1=========== + # module: app.backend.chat_history.cosmosdb + chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") + ===========changed ref 2=========== # module: app.backend.core.sessionhelper + def create_session_id( + config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool + ) -> Union[str, None]: - def create_session_id(config_chat_history_browser_enabled: bool) -> Union[str, None]: + if config_chat_history_cosmos_enabled: + return str(uuid.uuid4()) if config_chat_history_browser_enabled: return str(uuid.uuid4()) return None ===========changed ref 3=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.after_app_serving + async def close_clients(): + if current_app.config.get(CONFIG_COSMOS_HISTORY_CLIENT): + cosmos_client: CosmosClient = current_app.config[CONFIG_COSMOS_HISTORY_CLIENT] + await cosmos_client.close() + ===========changed ref 4=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.delete("/chat_history/items/<item_id>") + @authenticated + async def delete_chat_history_session(auth_claims: Dict[str, Any], item_id: str): + if not current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED]: + return jsonify({"error": "Chat history not enabled"}), 400 + + container: ContainerProxy = current_app.config[CONFIG_COSMOS_HISTORY_CONTAINER] + if not container: + return jsonify({"error": "Chat history not enabled"}), 400 + + entra_oid = auth_claims.get("oid") + if not entra_oid: + return jsonify({"error": "User OID not found"}), 401 + + try: + await container.delete_item(item=item_id, partition_key=entra_oid) + return jsonify({}), 204 + except Exception as error: + return error_response(error, f"/chat_history/items/{item_id}") + ===========changed ref 5=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.get("/chat_history/items/<item_id>") + @authenticated + async def get_chat_history_session(auth_claims: Dict[str, Any], item_id: str): + if not current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED]: + return jsonify({"error": "Chat history not enabled"}), 400 + + container: ContainerProxy = current_app.config[CONFIG_COSMOS_HISTORY_CONTAINER] + if not container: + return jsonify({"error": "Chat history not enabled"}), 400 + + entra_oid = auth_claims.get("oid") + if not entra_oid: + return jsonify({"error": "User OID not found"}), 401 + + try: + res = await container.read_item(item=item_id, partition_key=entra_oid) + return ( + jsonify( + { + "id": res.get("id"), + "entra_oid": res.get("entra_oid"), + "title": res.get("title", "untitled"), + "timestamp": res.get("timestamp"), + "answers": res.get("answers", []), + } + ), + 200, + ) + except Exception as error: + return error_response(error, f"/chat_history/items/{item_id}") + ===========changed ref 6=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.post("/chat_history") + @authenticated + async def post_chat_history(auth_claims: Dict[str, Any]): + if not current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED]: + return jsonify({"error": "Chat history not enabled"}), 400 + + container: ContainerProxy = current_app.config[CONFIG_COSMOS_HISTORY_CONTAINER] + if not container: + return jsonify({"error": "Chat history not enabled"}), 400 + + entra_oid = auth_claims.get("oid") + if not entra_oid: + return jsonify({"error": "User OID not found"}), 401 + + try: + request_json = await request.get_json() + id = request_json.get("id") + answers = request_json.get("answers") + title = answers[0][0][:50] + "..." if len(answers[0][0]) > 50 else answers[0][0] + timestamp = int(time.time() * 1000) + + await container.upsert_item( + {"id": id, "entra_oid": entra_oid, "title": title, "answers": answers, "timestamp": timestamp} + ) + + return jsonify({}), 201 + except Exception as error: + return error_response(error, "/chat_history") +
tests.conftest/auth_public_documents_client
Modified
Azure-Samples~azure-search-openai-demo
c3810e8faddb1303530df9581f68b41991e1b65a
Feature: Store chat history in Cosmos DB (#2063)
<11>:<add> monkeypatch.setenv("USE_CHAT_HISTORY_COSMOS", "true") <add> monkeypatch.setenv("AZURE_COSMOSDB_ACCOUNT", "test-cosmosdb-account") <add> monkeypatch.setenv("AZURE_CHAT_HISTORY_DATABASE", "test-cosmosdb-database") <add> monkeypatch.setenv("AZURE_CHAT_HISTORY_CONTAINER", "test-cosmosdb-container") <add>
# module: tests.conftest @pytest_asyncio.fixture(params=auth_public_envs) async def auth_public_documents_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, request, ): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <5> monkeypatch.setenv("USE_USER_UPLOAD", "true") <6> monkeypatch.setenv("AZURE_USERSTORAGE_ACCOUNT", "test-userstorage-account") <7> monkeypatch.setenv("AZURE_USERSTORAGE_CONTAINER", "test-userstorage-container") <8> monkeypatch.setenv("USE_LOCAL_PDF_PARSER", "true") <9> monkeypatch.setenv("USE_LOCAL_HTML_PARSER", "true") <10> monkeypatch.setenv("AZURE_DOCUMENTINTELLIGENCE_SERVICE", "test-documentintelligence-service") <11> for key, value in request.param.items(): <12> monkeypatch.setenv(key, value) <13> <14> with mock.patch("app.AzureDeveloperCliCredential") as mock_default_azure_credential: <15> mock_default_azure_credential.return_value = MockAzureCredential() <16> quart_app = app.create_app() <17> <18> async with quart_app.test_app() as test_app: <19> quart_app.config.update({"TESTING</s>
===========below chunk 0=========== # module: tests.conftest @pytest_asyncio.fixture(params=auth_public_envs) async def auth_public_documents_client( monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_confidential_client_success, mock_validate_token_success, mock_list_groups_success, mock_acs_search_filter, request, ): # offset: 1 mock_openai_chatcompletion(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) mock_openai_embedding(test_app.app.config[app.CONFIG_OPENAI_CLIENT]) client = test_app.test_client() client.config = quart_app.config yield client ===========changed ref 0=========== + # module: tests.test_cosmosdb + + ===========changed ref 1=========== + # module: app.backend.chat_history.cosmosdb + + ===========changed ref 2=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __aiter__(self): + return self + ===========changed ref 3=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 4=========== # module: app.backend.decorators + _C = TypeVar("_C", bound=Callable[..., Any]) ===========changed ref 5=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 6=========== + # module: app.backend.chat_history.cosmosdb + chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") + ===========changed ref 7=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.delete( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 8=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_getitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.get( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 9=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def by_page(self, continuation_token=None): + if continuation_token: + self.continuation_token = continuation_token + "next" + else: + self.continuation_token = "next" + return self + ===========changed ref 10=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_disabled(client, monkeypatch): + response = await client.delete( + "/chat_history/items/123", + headers={"Authorization": "Bearer MockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 11=========== + # module: tests.test_cosmosdb + # Error handling tests for getting an individual chat history item + @pytest.mark.asyncio + async def test_chathistory_getitem_error_disabled(client, monkeypatch): + response = await client.get( + "/chat_history/items/123", + headers={"Authorization": "BearerMockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 12=========== # module: app.backend.core.sessionhelper + def create_session_id( + config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool + ) -> Union[str, None]: - def create_session_id(config_chat_history_browser_enabled: bool) -> Union[str, None]: + if config_chat_history_cosmos_enabled: + return str(uuid.uuid4()) if config_chat_history_browser_enabled: return str(uuid.uuid4()) return None ===========changed ref 13=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.after_app_serving + async def close_clients(): + if current_app.config.get(CONFIG_COSMOS_HISTORY_CLIENT): + cosmos_client: CosmosClient = current_app.config[CONFIG_COSMOS_HISTORY_CLIENT] + await cosmos_client.close() + ===========changed ref 14=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_newitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.post( + "/chat_history", + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 401 + ===========changed ref 15=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_query_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.post( + "/chat_history/items", + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 401 + ===========changed ref 16=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_newitem_error_disabled(client, monkeypatch): + response = await client.post( + "/chat_history", + headers={"Authorization": "Bearer MockToken"}, + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 400 + ===========changed ref 17=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_query_error_disabled(client, monkeypatch): + response = await client.post( + "/chat_history/items", + headers={"Authorization": "Bearer MockToken"}, + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 400 + ===========changed ref 18=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_container(auth_public_documents_client, monkeypatch): + auth_public_documents_client.app.config["cosmos_history_container"] = None + response = await auth_public_documents_client.delete( + "/chat_history/items/123", + headers={"Authorization": "Bearer MockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 19=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_getitem_error_container(auth_public_documents_client, monkeypatch): + auth_public_documents_client.app.config["cosmos_history_container"] = None + response = await auth_public_documents_client.get( + "/chat_history/items/123", + headers={"Authorization": "BearerMockToken"}, + ) + assert response.status_code == 400 +
tests.test_app/test_chat_text_filter_public_documents
Modified
Azure-Samples~azure-search-openai-demo
c3810e8faddb1303530df9581f68b41991e1b65a
Feature: Store chat history in Cosmos DB (#2063)
<21>:<add> if result.get("session_state"): <add> del result["session_state"]
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter_public_documents(auth_public_documents_client, snapshot): <0> response = await auth_public_documents_client.post( <1> "/chat", <2> headers={"Authorization": "Bearer MockToken"}, <3> json={ <4> "messages": [{"content": "What is the capital of France?", "role": "user"}], <5> "context": { <6> "overrides": { <7> "retrieval_mode": "text", <8> "use_oid_security_filter": True, <9> "use_groups_security_filter": True, <10> "exclude_category": "excluded", <11> }, <12> }, <13> }, <14> ) <15> assert response.status_code == 200 <16> assert ( <17> auth_public_documents_client.config[app.CONFIG_SEARCH_CLIENT].filter <18> == "category ne 'excluded' and ((oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z'))) or (not oids/any() and not groups/any()))" <19> ) <20> result = await response.get_json() <21> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <22>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str ===========changed ref 0=========== + # module: tests.test_cosmosdb + + ===========changed ref 1=========== + # module: app.backend.chat_history.cosmosdb + + ===========changed ref 2=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __aiter__(self): + return self + ===========changed ref 3=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 4=========== # module: app.backend.decorators + _C = TypeVar("_C", bound=Callable[..., Any]) ===========changed ref 5=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 6=========== + # module: app.backend.chat_history.cosmosdb + chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") + ===========changed ref 7=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.delete( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 8=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_getitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.get( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 9=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def by_page(self, continuation_token=None): + if continuation_token: + self.continuation_token = continuation_token + "next" + else: + self.continuation_token = "next" + return self + ===========changed ref 10=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_disabled(client, monkeypatch): + response = await client.delete( + "/chat_history/items/123", + headers={"Authorization": "Bearer MockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 11=========== + # module: tests.test_cosmosdb + # Error handling tests for getting an individual chat history item + @pytest.mark.asyncio + async def test_chathistory_getitem_error_disabled(client, monkeypatch): + response = await client.get( + "/chat_history/items/123", + headers={"Authorization": "BearerMockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 12=========== # module: app.backend.core.sessionhelper + def create_session_id( + config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool + ) -> Union[str, None]: - def create_session_id(config_chat_history_browser_enabled: bool) -> Union[str, None]: + if config_chat_history_cosmos_enabled: + return str(uuid.uuid4()) if config_chat_history_browser_enabled: return str(uuid.uuid4()) return None ===========changed ref 13=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.after_app_serving + async def close_clients(): + if current_app.config.get(CONFIG_COSMOS_HISTORY_CLIENT): + cosmos_client: CosmosClient = current_app.config[CONFIG_COSMOS_HISTORY_CLIENT] + await cosmos_client.close() + ===========changed ref 14=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_newitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.post( + "/chat_history", + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 401 + ===========changed ref 15=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_query_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.post( + "/chat_history/items", + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 401 + ===========changed ref 16=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_newitem_error_disabled(client, monkeypatch): + response = await client.post( + "/chat_history", + headers={"Authorization": "Bearer MockToken"}, + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 400 + ===========changed ref 17=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_query_error_disabled(client, monkeypatch): + response = await client.post( + "/chat_history/items", + headers={"Authorization": "Bearer MockToken"}, + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 400 + ===========changed ref 18=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_container(auth_public_documents_client, monkeypatch): + auth_public_documents_client.app.config["cosmos_history_container"] = None + response = await auth_public_documents_client.delete( + "/chat_history/items/123", + headers={"Authorization": "Bearer MockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 19=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_getitem_error_container(auth_public_documents_client, monkeypatch): + auth_public_documents_client.app.config["cosmos_history_container"] = None + response = await auth_public_documents_client.get( + "/chat_history/items/123", + headers={"Authorization": "BearerMockToken"}, + ) + assert response.status_code == 400 +
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
c3810e8faddb1303530df9581f68b41991e1b65a
Feature: Store chat history in Cosmos DB (#2063)
<17>:<add> session_state = create_session_id( <add> current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], <add> current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], <del> session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) <18>:<add> )
# module: app.backend.app @bp.route("/chat", methods=["POST"]) @authenticated async def chat(auth_claims: Dict[str, Any]): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> context["auth_claims"] = auth_claims <5> try: <6> use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) <7> approach: Approach <8> if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: <9> approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) <10> else: <11> approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) <12> <13> # If session state is provided, persists the session state, <14> # else creates a new session_id depending on the chat history options enabled. <15> session_state = request_json.get("session_state") <16> if session_state is None: <17> session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) <18> result = await approach.run( <19> request_json["messages"], <20> context=context, <21> session_state=session_state, <22> ) <23> return jsonify(result) <24> except Exception as error: <25> return error_response(error, "/chat") <26>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: approaches.approach Approach(search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: approaches.approach.Approach ALLOW_NON_GPT_MODELS = True run(messages: list[ChatCompletionMessageParam], session_state: Any=None, context: dict[str, Any]={}) -> dict[str, Any] at: config CONFIG_CHAT_VISION_APPROACH = "chat_vision_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_CHAT_HISTORY_BROWSER_ENABLED = "chat_history_browser_enabled" CONFIG_CHAT_HISTORY_COSMOS_ENABLED = "chat_history_cosmos_enabled" at: core.sessionhelper create_session_id(config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool) -> Union[str, None] at: decorators authenticated(route_fn: _C) -> _C at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== + # module: app.backend.chat_history + + ===========changed ref 1=========== + # module: tests.test_cosmosdb + + ===========changed ref 2=========== + # module: app.backend.chat_history.cosmosdb + + ===========changed ref 3=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __aiter__(self): + return self + ===========changed ref 4=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 5=========== # module: app.backend.decorators + _C = TypeVar("_C", bound=Callable[..., Any]) ===========changed ref 6=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 7=========== + # module: app.backend.chat_history.cosmosdb + chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") + ===========changed ref 8=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.delete( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 9=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_getitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.get( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 10=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def by_page(self, continuation_token=None): + if continuation_token: + self.continuation_token = continuation_token + "next" + else: + self.continuation_token = "next" + return self + ===========changed ref 11=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_disabled(client, monkeypatch): + response = await client.delete( + "/chat_history/items/123", + headers={"Authorization": "Bearer MockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 12=========== + # module: tests.test_cosmosdb + # Error handling tests for getting an individual chat history item + @pytest.mark.asyncio + async def test_chathistory_getitem_error_disabled(client, monkeypatch): + response = await client.get( + "/chat_history/items/123", + headers={"Authorization": "BearerMockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 13=========== # module: app.backend.core.sessionhelper + def create_session_id( + config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool + ) -> Union[str, None]: - def create_session_id(config_chat_history_browser_enabled: bool) -> Union[str, None]: + if config_chat_history_cosmos_enabled: + return str(uuid.uuid4()) if config_chat_history_browser_enabled: return str(uuid.uuid4()) return None ===========changed ref 14=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.after_app_serving + async def close_clients(): + if current_app.config.get(CONFIG_COSMOS_HISTORY_CLIENT): + cosmos_client: CosmosClient = current_app.config[CONFIG_COSMOS_HISTORY_CLIENT] + await cosmos_client.close() + ===========changed ref 15=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_newitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.post( + "/chat_history", + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 401 + ===========changed ref 16=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_query_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.post( + "/chat_history/items", + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 401 + ===========changed ref 17=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_newitem_error_disabled(client, monkeypatch): + response = await client.post( + "/chat_history", + headers={"Authorization": "Bearer MockToken"}, + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 400 + ===========changed ref 18=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_query_error_disabled(client, monkeypatch): + response = await client.post( + "/chat_history/items", + headers={"Authorization": "Bearer MockToken"}, + json={ + "id": "123", + "answers": [["This is a test message"]], + }, + ) + assert response.status_code == 400 +
app.backend.app/chat_stream
Modified
Azure-Samples~azure-search-openai-demo
c3810e8faddb1303530df9581f68b41991e1b65a
Feature: Store chat history in Cosmos DB (#2063)
<17>:<add> session_state = create_session_id( <add> current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], <add> current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], <del> session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) <18>:<add> )
# module: app.backend.app @bp.route("/chat/stream", methods=["POST"]) @authenticated async def chat_stream(auth_claims: Dict[str, Any]): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> context = request_json.get("context", {}) <4> context["auth_claims"] = auth_claims <5> try: <6> use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) <7> approach: Approach <8> if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: <9> approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) <10> else: <11> approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) <12> <13> # If session state is provided, persists the session state, <14> # else creates a new session_id depending on the chat history options enabled. <15> session_state = request_json.get("session_state") <16> if session_state is None: <17> session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) <18> result = await approach.run_stream( <19> request_json["messages"], <20> context=context, <21> session_state=session_state, <22> ) <23> response = await make_response(format_as_ndjson(result)) <24> response.timeout = None # type: ignore <25> response.mimetype = "application/json-lines" <26> return response <27> except Exception as error: <28> return error_response(error, "/chat") <29>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: app.backend.app.chat result = await approach.run( request_json["messages"], context=context, session_state=session_state, ) at: approaches.approach Approach(search_client: SearchClient, openai_client: AsyncOpenAI, auth_helper: AuthenticationHelper, query_language: Optional[str], query_speller: Optional[str], embedding_deployment: Optional[str], embedding_model: str, embedding_dimensions: int, openai_host: str, vision_endpoint: str, vision_token_provider: Callable[[], Awaitable[str]]) at: approaches.approach.Approach run_stream(messages: list[ChatCompletionMessageParam], session_state: Any=None, context: dict[str, Any]={}) -> AsyncGenerator[dict[str, Any], None] at: config CONFIG_CHAT_VISION_APPROACH = "chat_vision_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_CHAT_HISTORY_BROWSER_ENABLED = "chat_history_browser_enabled" CONFIG_CHAT_HISTORY_COSMOS_ENABLED = "chat_history_cosmos_enabled" at: core.sessionhelper create_session_id(config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool) -> Union[str, None] at: decorators authenticated(route_fn: _C) -> _C at: error error_response(error: Exception, route: str, status_code: int=500) at: typing cast(typ: Type[_T], val: Any) -> _T cast(typ: str, val: Any) -> Any cast(typ: object, val: Any) -> Any Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) @authenticated async def chat(auth_claims: Dict[str, Any]): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) # If session state is provided, persists the session state, # else creates a new session_id depending on the chat history options enabled. session_state = request_json.get("session_state") if session_state is None: + session_state = create_session_id( + current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], + current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], - session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) + ) result = await approach.run( request_json["messages"], context=context, session_state=session_state, ) return jsonify(result) except Exception as error: return error_response(error, "/chat") ===========changed ref 1=========== + # module: app.backend.chat_history + + ===========changed ref 2=========== + # module: tests.test_cosmosdb + + ===========changed ref 3=========== + # module: app.backend.chat_history.cosmosdb + + ===========changed ref 4=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __aiter__(self): + return self + ===========changed ref 5=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 6=========== # module: app.backend.decorators + _C = TypeVar("_C", bound=Callable[..., Any]) ===========changed ref 7=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 8=========== + # module: app.backend.chat_history.cosmosdb + chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") + ===========changed ref 9=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.delete( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 10=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_getitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.get( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 11=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def by_page(self, continuation_token=None): + if continuation_token: + self.continuation_token = continuation_token + "next" + else: + self.continuation_token = "next" + return self + ===========changed ref 12=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_disabled(client, monkeypatch): + response = await client.delete( + "/chat_history/items/123", + headers={"Authorization": "Bearer MockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 13=========== + # module: tests.test_cosmosdb + # Error handling tests for getting an individual chat history item + @pytest.mark.asyncio + async def test_chathistory_getitem_error_disabled(client, monkeypatch): + response = await client.get( + "/chat_history/items/123", + headers={"Authorization": "BearerMockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 14=========== # module: app.backend.core.sessionhelper + def create_session_id( + config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool + ) -> Union[str, None]: - def create_session_id(config_chat_history_browser_enabled: bool) -> Union[str, None]: + if config_chat_history_cosmos_enabled: + return str(uuid.uuid4()) if config_chat_history_browser_enabled: return str(uuid.uuid4()) return None ===========changed ref 15=========== + # module: app.backend.chat_history.cosmosdb + @chat_history_cosmosdb_bp.after_app_serving + async def close_clients(): + if current_app.config.get(CONFIG_COSMOS_HISTORY_CLIENT): + cosmos_client: CosmosClient = current_app.config[CONFIG_COSMOS_HISTORY_CLIENT] + await cosmos_client.close() +
app.backend.app/config
Modified
Azure-Samples~azure-search-openai-demo
c3810e8faddb1303530df9581f68b41991e1b65a
Feature: Store chat history in Cosmos DB (#2063)
<11>:<add> "showChatHistoryCosmos": current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED],
# module: app.backend.app @bp.route("/config", methods=["GET"]) def config(): <0> return jsonify( <1> { <2> "showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED], <3> "showSemanticRankerOption": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED], <4> "showVectorOption": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED], <5> "showUserUpload": current_app.config[CONFIG_USER_UPLOAD_ENABLED], <6> "showLanguagePicker": current_app.config[CONFIG_LANGUAGE_PICKER_ENABLED], <7> "showSpeechInput": current_app.config[CONFIG_SPEECH_INPUT_ENABLED], <8> "showSpeechOutputBrowser": current_app.config[CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED], <9> "showSpeechOutputAzure": current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED], <10> "showChatHistoryBrowser": current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], <11> } <12> ) <13>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: config CONFIG_AUTH_CLIENT = "auth_client" CONFIG_GPT4V_DEPLOYED = "gpt4v_deployed" CONFIG_SEMANTIC_RANKER_DEPLOYED = "semantic_ranker_deployed" CONFIG_VECTOR_SEARCH_ENABLED = "vector_search_enabled" ===========changed ref 0=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) @authenticated async def chat(auth_claims: Dict[str, Any]): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) # If session state is provided, persists the session state, # else creates a new session_id depending on the chat history options enabled. session_state = request_json.get("session_state") if session_state is None: + session_state = create_session_id( + current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], + current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], - session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) + ) result = await approach.run( request_json["messages"], context=context, session_state=session_state, ) return jsonify(result) except Exception as error: return error_response(error, "/chat") ===========changed ref 1=========== # module: app.backend.app @bp.route("/chat/stream", methods=["POST"]) @authenticated async def chat_stream(auth_claims: Dict[str, Any]): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) # If session state is provided, persists the session state, # else creates a new session_id depending on the chat history options enabled. session_state = request_json.get("session_state") if session_state is None: + session_state = create_session_id( + current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], + current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], - session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) + ) result = await approach.run_stream( request_json["messages"], context=context, session_state=session_state, ) response = await make_response(format_as_ndjson(result)) response.timeout = None # type: ignore response.mimetype = "application/json-lines" return response except Exception as error: return error_response(error, "/chat") ===========changed ref 2=========== + # module: app.backend.chat_history + + ===========changed ref 3=========== + # module: tests.test_cosmosdb + + ===========changed ref 4=========== + # module: app.backend.chat_history.cosmosdb + + ===========changed ref 5=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __aiter__(self): + return self + ===========changed ref 6=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 7=========== # module: app.backend.decorators + _C = TypeVar("_C", bound=Callable[..., Any]) ===========changed ref 8=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 9=========== + # module: app.backend.chat_history.cosmosdb + chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") + ===========changed ref 10=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.delete( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 11=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_getitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.get( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 12=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def by_page(self, continuation_token=None): + if continuation_token: + self.continuation_token = continuation_token + "next" + else: + self.continuation_token = "next" + return self + ===========changed ref 13=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_disabled(client, monkeypatch): + response = await client.delete( + "/chat_history/items/123", + headers={"Authorization": "Bearer MockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 14=========== + # module: tests.test_cosmosdb + # Error handling tests for getting an individual chat history item + @pytest.mark.asyncio + async def test_chathistory_getitem_error_disabled(client, monkeypatch): + response = await client.get( + "/chat_history/items/123", + headers={"Authorization": "BearerMockToken"}, + ) + assert response.status_code == 400 + ===========changed ref 15=========== # module: app.backend.core.sessionhelper + def create_session_id( + config_chat_history_cosmos_enabled: bool, config_chat_history_browser_enabled: bool + ) -> Union[str, None]: - def create_session_id(config_chat_history_browser_enabled: bool) -> Union[str, None]: + if config_chat_history_cosmos_enabled: + return str(uuid.uuid4()) if config_chat_history_browser_enabled: return str(uuid.uuid4()) return None
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
c3810e8faddb1303530df9581f68b41991e1b65a
Feature: Store chat history in Cosmos DB (#2063)
<2>:<add> app.register_blueprint(chat_history_cosmosdb_bp)
# module: app.backend.app def create_app(): <0> app = Quart(__name__) <1> app.register_blueprint(bp) <2> <3> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <4> app.logger.info("APPLICATIONINSIGHTS_CONNECTION_STRING is set, enabling Azure Monitor") <5> configure_azure_monitor() <6> # This tracks HTTP requests made by aiohttp: <7> AioHttpClientInstrumentor().instrument() <8> # This tracks HTTP requests made by httpx: <9> HTTPXClientInstrumentor().instrument() <10> # This tracks OpenAI SDK requests: <11> OpenAIInstrumentor().instrument() <12> # This middleware tracks app route requests: <13> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) # type: ignore[assignment] <14> <15> # Log levels should be one of https://docs.python.org/3/library/logging.html#logging-levels <16> # Set root level to WARNING to avoid seeing overly verbose logs from SDKS <17> logging.basicConfig(level=logging.WARNING) <18> # Set our own logger levels to INFO by default <19> app_level = os.getenv("APP_LOG_LEVEL", "INFO") <20> app.logger.setLevel(os.getenv("APP_LOG_LEVEL", app_level)) <21> logging.getLogger("scripts").setLevel(app_level) <22> <23> if allowed_origin := os.getenv("ALLOWED_ORIGIN"): <24> app.logger.info("ALLOWED_ORIGIN is set, enabling CORS for %s", allowed_origin) <25> cors(app, allow_origin=allowed_origin, allow_methods=["GET", "POST"]) <26> return app <27>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: app.backend.app.setup_clients AZURE_SEARCH_QUERY_LANGUAGE = os.getenv("AZURE_SEARCH_QUERY_LANGUAGE", "en-us") AZURE_SEARCH_QUERY_SPELLER = os.getenv("AZURE_SEARCH_QUERY_SPELLER", "lexicon") at: chat_history.cosmosdb chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") at: config CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_USER_BLOB_CONTAINER_CLIENT = "user_blob_container_client" CONFIG_SEARCH_CLIENT = "search_client" at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: app.backend.app @bp.route("/config", methods=["GET"]) def config(): return jsonify( { "showGPT4VOptions": current_app.config[CONFIG_GPT4V_DEPLOYED], "showSemanticRankerOption": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED], "showVectorOption": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED], "showUserUpload": current_app.config[CONFIG_USER_UPLOAD_ENABLED], "showLanguagePicker": current_app.config[CONFIG_LANGUAGE_PICKER_ENABLED], "showSpeechInput": current_app.config[CONFIG_SPEECH_INPUT_ENABLED], "showSpeechOutputBrowser": current_app.config[CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED], "showSpeechOutputAzure": current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED], "showChatHistoryBrowser": current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], + "showChatHistoryCosmos": current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], } ) ===========changed ref 1=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) @authenticated async def chat(auth_claims: Dict[str, Any]): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) # If session state is provided, persists the session state, # else creates a new session_id depending on the chat history options enabled. session_state = request_json.get("session_state") if session_state is None: + session_state = create_session_id( + current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], + current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], - session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) + ) result = await approach.run( request_json["messages"], context=context, session_state=session_state, ) return jsonify(result) except Exception as error: return error_response(error, "/chat") ===========changed ref 2=========== # module: app.backend.app @bp.route("/chat/stream", methods=["POST"]) @authenticated async def chat_stream(auth_claims: Dict[str, Any]): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: approach = cast(Approach, current_app.config[CONFIG_CHAT_VISION_APPROACH]) else: approach = cast(Approach, current_app.config[CONFIG_CHAT_APPROACH]) # If session state is provided, persists the session state, # else creates a new session_id depending on the chat history options enabled. session_state = request_json.get("session_state") if session_state is None: + session_state = create_session_id( + current_app.config[CONFIG_CHAT_HISTORY_COSMOS_ENABLED], + current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED], - session_state = create_session_id(current_app.config[CONFIG_CHAT_HISTORY_BROWSER_ENABLED]) + ) result = await approach.run_stream( request_json["messages"], context=context, session_state=session_state, ) response = await make_response(format_as_ndjson(result)) response.timeout = None # type: ignore response.mimetype = "application/json-lines" return response except Exception as error: return error_response(error, "/chat") ===========changed ref 3=========== + # module: app.backend.chat_history + + ===========changed ref 4=========== + # module: tests.test_cosmosdb + + ===========changed ref 5=========== + # module: app.backend.chat_history.cosmosdb + + ===========changed ref 6=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __aiter__(self): + return self + ===========changed ref 7=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def get_count(self): + return len(self.data) + ===========changed ref 8=========== # module: app.backend.decorators + _C = TypeVar("_C", bound=Callable[..., Any]) ===========changed ref 9=========== + # module: tests.test_cosmosdb + class MockCosmosDBResultsIterator: + def __anext__(self): + if not self.data: + raise StopAsyncIteration + return MockAsyncPageIterator(self.data.pop(0)) + ===========changed ref 10=========== + # module: app.backend.chat_history.cosmosdb + chat_history_cosmosdb_bp = Blueprint("chat_history_cosmos", __name__, static_folder="static") + ===========changed ref 11=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_deleteitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.delete( + "/chat_history/items/123", + ) + assert response.status_code == 401 + ===========changed ref 12=========== + # module: tests.test_cosmosdb + @pytest.mark.asyncio + async def test_chathistory_getitem_error_entra(auth_public_documents_client, monkeypatch): + response = await auth_public_documents_client.get( + "/chat_history/items/123", + ) + assert response.status_code == 401 +
app.backend.load_azd_env/load_azd_env
Modified
Azure-Samples~azure-search-openai-demo
9e960ec671b7a08af6a6d322ba93e4ec492700bc
let env var overrule .dot env setting (#2136)
<11>:<add> loading_mode = os.getenv("LOADING_MODE_FOR_AZD_ENV_VARS") or "override" <add> if loading_mode == "no-override": <add> logger.info("Loading azd env from %s, but not overriding existing environment variables", env_file_path) <add> load_dotenv(env_file_path, override=False) <add> else: <add> logger.info("Loading azd env from %s, which may override existing environment variables", env_file_path) <del> logger.info(f"Loading azd env from {env_file_path}") <12>:<add> load_dotenv(env_file_path, override=True) <del> load_dotenv(env_file_path, override=True)
# module: app.backend.load_azd_env def load_azd_env(): <0> """Get path to current azd env file and load file using python-dotenv""" <1> result = subprocess.run("azd env list -o json", shell=True, capture_output=True, text=True) <2> if result.returncode != 0: <3> raise Exception("Error loading azd env") <4> env_json = json.loads(result.stdout) <5> env_file_path = None <6> for entry in env_json: <7> if entry["IsDefault"]: <8> env_file_path = entry["DotEnvPath"] <9> if not env_file_path: <10> raise Exception("No default azd env file found") <11> logger.info(f"Loading azd env from {env_file_path}") <12> load_dotenv(env_file_path, override=True) <13>
===========unchanged ref 0=========== at: json loads(s: Union[str, bytes], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========unchanged ref 1=========== at: subprocess run(args: _CMD, bufsize: int=..., executable: Optional[AnyPath]=..., stdin: _FILE=..., stdout: _FILE=..., stderr: _FILE=..., preexec_fn: Callable[[], Any]=..., close_fds: bool=..., shell: bool=..., cwd: Optional[AnyPath]=..., env: Optional[_ENV]=..., universal_newlines: bool=..., startupinfo: Any=..., creationflags: int=..., restore_signals: bool=..., start_new_session: bool=..., pass_fds: Any=..., *, capture_output: bool=..., check: bool=..., encoding: Optional[str]=..., errors: Optional[str]=..., input: Optional[str]=..., text: Literal[True], timeout: Optional[float]=...) -> CompletedProcess[str] run(args: _CMD, bufsize: int=..., executable: Optional[AnyPath]=..., stdin: _FILE=..., stdout: _FILE=..., stderr: _FILE=..., preexec_fn: Callable[[], Any]=..., close_fds: bool=..., shell: bool=..., cwd: Optional[AnyPath]=..., env: Optional[_ENV]=..., universal_newlines: bool=..., startupinfo: Any=..., creationflags: int=..., restore_signals: bool=..., start_new_session: bool=..., pass_fds: Any=..., *, capture_output: bool=..., check: bool=..., encoding: str, errors: Optional[str]=..., input: Optional[str]=..., text: Optional[bool]=..., timeout: Optional[float]=...) -> CompletedProcess[str] run(args: _CMD, bufsize: int=..., executable: Optional[AnyPath]=..., stdin: _FILE=..., stdout: _FILE=..., stderr: _FILE=..., preexec_fn: Callable[[], Any]=..., close_fds: bool=..., shell: bool=..., cwd: Optional[AnyPath]=..., env: Optional[_ENV]=..., *</s> ===========unchanged ref 2=========== at: subprocess.CompletedProcess.__init__ self.returncode = returncode self.stdout = stdout
scrapling.parser/Adaptor.__init__
Modified
D4Vinci~Scrapling
a01cfe66dbd0b21c3f0606bb7a7a498d468264e0
Version 0.1.2
<s> Optional[str] = None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): <0> """The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements <1> with expressions in CSS, XPath, or with simply text. Check the docs for more info. <2> <3> Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not <4> inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs <5> not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`. <6> It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>` <7> <8> :param text: HTML body passed as text. <9> :param url: allows storing a URL with the html data for retrieving later. <10> :param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument. <11> :param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8` <12> :param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls <13> libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion. <14> :param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority. <15> Don't use it unless you know what you are doing! <16> :param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons <17> :param auto_match: Glob</s>
===========below chunk 0=========== <s> None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): # offset: 1 priority over all auto-match related arguments/functions in the class. :param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info. :param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class. If empty, default values will be used. :param debug: Enable debug mode """ if root is None and not body and text is None: raise ValueError("Adaptor class needs text, body, or root arguments to work") if root is None: if text is None: if not body or not isinstance(body, bytes): raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}") body = body.replace(b"\x00", b"").strip() else: if not isinstance(text, str): raise TypeError(f"text argument must be of type str, got {text.__class__}") body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>" parser = html.HTMLParser( # https://lxml.de/api/lxml.etree.HTMLParser-class.html recover=True, remove_blank_text=True, remove_comments=(keep_comments is True), encoding=encoding, compact=True, huge_tree=huge_tree, default_doctype=True ) self._root = etree.fromstring(body, parser=parser, base_url=url) else: # All html types inherits from HtmlMixin so this</s> ===========below chunk 1=========== <s> None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): # offset: 2 <s>(body, parser=parser, base_url=url) else: # All html types inherits from HtmlMixin so this to check for all at once if not issubclass(type(root), html.HtmlMixin): raise TypeError( f"Root have to be a valid element of `html` module types to work, not of type {type(root)}" ) self._root = root setup_basic_logging(level='debug' if debug else 'info') self.__auto_match_enabled = auto_match if self.__auto_match_enabled: if not storage_args: storage_args = { 'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'), 'url': url } if not hasattr(storage, '__wrapped__'): raise ValueError("Storage class must be wrapped with cache decorator, see docs for info") if not issubclass(storage.__wrapped__, StorageSystemMixin): raise ValueError("Storage system must be inherited from class `StorageSystemMixin`") self._storage = storage(**storage_args) self.__keep_comments = keep_comments self.__huge_tree_enabled = huge_tree self.encoding = encoding self.url = url # For selector stuff self.__attributes = None self.__text = None self.__tag = None self.__debug = debug ===========unchanged ref 0=========== at: functools._lru_cache_wrapper __wrapped__: Callable[..., _T] at: os.path join(a: StrPath, *paths: StrPath) -> str join(a: BytesPath, *paths: BytesPath) -> bytes dirname(p: _PathLike[AnyStr]) -> AnyStr dirname(p: AnyStr) -> AnyStr at: scrapling.parser.Adaptor.attrib self.__attributes = AttributesHandler(self._root.attrib) at: scrapling.parser.Adaptor.tag self.__tag = self._root.tag at: scrapling.parser.Adaptor.text self.__text = TextHandler(self._root.text) self.__text = TextHandler(fragment_root.text) at: scrapling.storage_adaptors StorageSystemMixin(url: Union[str, None]=None) SQLiteStorageSystem(storage_file: str, url: Union[str, None]=None) _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T at: scrapling.utils setup_basic_logging(level: str='debug') _lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T at: typing Dict = _alias(dict, 2, inst=False, name='Dict')
scrapling.parser/Adaptor.__get_correct_result
Modified
D4Vinci~Scrapling
a01cfe66dbd0b21c3f0606bb7a7a498d468264e0
Version 0.1.2
<8>:<add> keep_comments=True, # if the comments are already removed in initialization, no need to try to delete them in sub-elements <add> huge_tree=self.__huge_tree_enabled, debug=self.__debug <del> keep_comments=self.__keep_comments, huge_tree=self.__huge_tree_enabled, debug=self.__debug
# module: scrapling.parser class Adaptor(SelectorsGeneration): def __get_correct_result( self, element: Union[html.HtmlElement, etree._ElementUnicodeResult] ) -> Union[TextHandler, html.HtmlElement, 'Adaptor', str]: <0> """Used internally in all functions to convert results to type (Adaptor|Adaptors) when possible""" <1> if self._is_text_node(element): <2> # etree._ElementUnicodeResult basically inherit from `str` so it's fine <3> return TextHandler(str(element)) <4> else: <5> if issubclass(type(element), html.HtmlMixin): <6> return self.__class__( <7> root=element, url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled, <8> keep_comments=self.__keep_comments, huge_tree=self.__huge_tree_enabled, debug=self.__debug <9> ) <10> return element <11>
===========unchanged ref 0=========== at: scrapling.custom_types TextHandler(o: object=...) TextHandler(o: bytes, encoding: str=..., errors: str=...) at: scrapling.parser.Adaptor __slots__ = ( 'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug', '__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag', ) _is_text_node(element: Union[html.HtmlElement, etree._ElementUnicodeResult]) -> bool body = html_content at: scrapling.parser.Adaptor.__init__ self.__auto_match_enabled = auto_match self.__huge_tree_enabled = huge_tree self.encoding = encoding self.url = url self.__debug = debug ===========changed ref 0=========== <s> Optional[str] = None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): """The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements with expressions in CSS, XPath, or with simply text. Check the docs for more info. Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`. It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>` :param text: HTML body passed as text. :param url: allows storing a URL with the html data for retrieving later. :param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument. :param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8` :param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion. :param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority. Don't use it unless you know what you are doing! :param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons :param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher </s> ===========changed ref 1=========== <s> None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): # offset: 1 <s> for obvious reasons :param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher priority over all auto-match related arguments/functions in the class. :param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info. :param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class. If empty, default values will be used. :param debug: Enable debug mode """ if root is None and not body and text is None: raise ValueError("Adaptor class needs text, body, or root arguments to work") if root is None: if text is None: if not body or not isinstance(body, bytes): raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}") body = body.replace(b"\x00", b"").strip() else: if not isinstance(text, str): raise TypeError(f"text argument must be of type str, got {text.__class__}") body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>" parser = html.HTMLParser( # https://lxml.de/api/lxml.etree.HTMLParser-class.html + recover=True, remove_blank_text=True, remove_comments=(keep_comments is False), encoding=encoding, </s> ===========changed ref 2=========== <s> None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): # offset: 2 <s> recover=True, remove_blank_text=True, remove_comments=(keep_comments is True), encoding=encoding, compact=True, huge_tree=huge_tree, default_doctype=True ) self._root = etree.fromstring(body, parser=parser, base_url=url) else: # All html types inherits from HtmlMixin so this to check for all at once if not issubclass(type(root), html.HtmlMixin): raise TypeError( f"Root have to be a valid element of `html` module types to work, not of type {type(root)}" ) self._root = root setup_basic_logging(level='debug' if debug else 'info') self.__auto_match_enabled = auto_match if self.__auto_match_enabled: if not storage_args: storage_args = { 'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'), 'url': url } if not hasattr(storage, '__wrapped__'): raise ValueError("Storage class must be wrapped with cache decorator, see docs for info") if not issubclass(storage.__wrapped__, StorageSystemMixin): raise ValueError("Storage system must be inherited from class `StorageSystemMixin`") self._storage = storage(**storage_args) self.__keep_comments = keep_comments self.__</s> ===========changed ref 3=========== <s> None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): # offset: 3 <s>_tree_enabled = huge_tree self.encoding = encoding self.url = url # For selector stuff self.__attributes = None self.__text = None self.__tag = None self.__debug = debug
scrapling.parser/Adaptor.text
Modified
D4Vinci~Scrapling
a01cfe66dbd0b21c3f0606bb7a7a498d468264e0
Version 0.1.2
<2>:<add> if self.__keep_comments: <add> # If use chose to keep comments, remove comments from text <add> # Escape lxml default behaviour and remove comments like this `<span>CONDITION: <!-- -->Excellent</span>` <add> # This issue is present in parsel/scrapy as well so no need to repeat it here so the user can run regex on the full text. <add> code = self.html_content <add> parser = html.HTMLParser( <add> recover=True, remove_blank_text=True, remove_comments=True, encoding=self.encoding, <add> compact=True, huge_tree=self.__huge_tree_enabled, default_doctype=True <add> ) <add> fragment_root = html.fragment_fromstring(code, parser=parser) <add> self.__text = TextHandler(fragment_root.text) <add> else: <add> self.__text = TextHandler(self._root.text) <del> self.__text = TextHandler(self._root.text)
# module: scrapling.parser class Adaptor(SelectorsGeneration): @property def text(self) -> TextHandler: <0> """Get text content of the element""" <1> if not self.__text: <2> self.__text = TextHandler(self._root.text) <3> return self.__text <4>
===========unchanged ref 0=========== at: scrapling.custom_types TextHandler(o: object=...) TextHandler(o: bytes, encoding: str=..., errors: str=...) at: scrapling.parser.Adaptor.__init__ self.__keep_comments = keep_comments self.__text = None at: scrapling.parser.Adaptor.text self.__text = TextHandler(self._root.text) self.__text = TextHandler(fragment_root.text) ===========changed ref 0=========== # module: scrapling.parser class Adaptor(SelectorsGeneration): def __get_correct_result( self, element: Union[html.HtmlElement, etree._ElementUnicodeResult] ) -> Union[TextHandler, html.HtmlElement, 'Adaptor', str]: """Used internally in all functions to convert results to type (Adaptor|Adaptors) when possible""" if self._is_text_node(element): # etree._ElementUnicodeResult basically inherit from `str` so it's fine return TextHandler(str(element)) else: if issubclass(type(element), html.HtmlMixin): return self.__class__( root=element, url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled, + keep_comments=True, # if the comments are already removed in initialization, no need to try to delete them in sub-elements + huge_tree=self.__huge_tree_enabled, debug=self.__debug - keep_comments=self.__keep_comments, huge_tree=self.__huge_tree_enabled, debug=self.__debug ) return element ===========changed ref 1=========== <s> Optional[str] = None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): """The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements with expressions in CSS, XPath, or with simply text. Check the docs for more info. Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`. It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>` :param text: HTML body passed as text. :param url: allows storing a URL with the html data for retrieving later. :param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument. :param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8` :param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion. :param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority. Don't use it unless you know what you are doing! :param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons :param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher </s> ===========changed ref 2=========== <s> None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): # offset: 1 <s> for obvious reasons :param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher priority over all auto-match related arguments/functions in the class. :param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info. :param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class. If empty, default values will be used. :param debug: Enable debug mode """ if root is None and not body and text is None: raise ValueError("Adaptor class needs text, body, or root arguments to work") if root is None: if text is None: if not body or not isinstance(body, bytes): raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}") body = body.replace(b"\x00", b"").strip() else: if not isinstance(text, str): raise TypeError(f"text argument must be of type str, got {text.__class__}") body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>" parser = html.HTMLParser( # https://lxml.de/api/lxml.etree.HTMLParser-class.html + recover=True, remove_blank_text=True, remove_comments=(keep_comments is False), encoding=encoding, </s> ===========changed ref 3=========== <s> None, url: Optional[str] = None, body: bytes = b"", encoding: str = "utf8", huge_tree: bool = True, root: Optional[html.HtmlElement] = None, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = False, storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True, ): # offset: 2 <s> recover=True, remove_blank_text=True, remove_comments=(keep_comments is True), encoding=encoding, compact=True, huge_tree=huge_tree, default_doctype=True ) self._root = etree.fromstring(body, parser=parser, base_url=url) else: # All html types inherits from HtmlMixin so this to check for all at once if not issubclass(type(root), html.HtmlMixin): raise TypeError( f"Root have to be a valid element of `html` module types to work, not of type {type(root)}" ) self._root = root setup_basic_logging(level='debug' if debug else 'info') self.__auto_match_enabled = auto_match if self.__auto_match_enabled: if not storage_args: storage_args = { 'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'), 'url': url } if not hasattr(storage, '__wrapped__'): raise ValueError("Storage class must be wrapped with cache decorator, see docs for info") if not issubclass(storage.__wrapped__, StorageSystemMixin): raise ValueError("Storage system must be inherited from class `StorageSystemMixin`") self._storage = storage(**storage_args) self.__keep_comments = keep_comments self.__</s>
scrapling.parser/Adaptor.text
Modified
D4Vinci~Scrapling
d546b3499b0d6e6f06d85bfb310b49b97ab89b00
Handling an edge case and adding more commentary
<3>:<add> if not self.children: <add> # If use chose to keep comments, remove comments from text <del> # If use chose to keep comments, remove comments from text <4>:<add> # Escape lxml default behaviour and remove comments like this `<span>CONDITION: <!-- -->Excellent</span>` <del> # Escape lxml default behaviour and remove comments like this `<span>CONDITION: <!-- -->Excellent</span>` <5>:<add> # This issue is present in parsel/scrapy as well so no need to repeat it here so the user can run regex on the full text. <del> # This issue is present in parsel/scrapy as well so no need to repeat it here so the user can run regex on the full text. <6>:<add> code = self.html_content <del> code = self.html_content <7>:<add> parser = html.HTMLParser( <del> parser = html.HTMLParser( <8>:<add> recover=True, remove_blank_text=True, remove_comments=True, encoding=self.encoding, <del> recover=True, remove_blank_text=True, remove_comments=True, encoding=self.encoding, <9>:<add> compact=True, huge_tree=self.__huge_tree_enabled, default_doctype=True <del> compact=True, huge_tree=self.__huge_tree_enabled, default_doctype=True <10>:<add> ) <del> ) <11>:<add> fragment_root = html.fragment_fromstring(code, parser=parser) <del> fragment_root = html.fragment_fromstring(code, parser=parser) <12>:<add> self.__text = TextHandler(fragment_root.text) <del> self.__text = TextHandler(fragment_root.text) <13>:<add> else: <add> self.__text = TextHandler(self._root.text) <14>:<add> # If user already ch
# module: scrapling.parser class Adaptor(SelectorsGeneration): @property def text(self) -> TextHandler: <0> """Get text content of the element""" <1> if not self.__text: <2> if self.__keep_comments: <3> # If use chose to keep comments, remove comments from text <4> # Escape lxml default behaviour and remove comments like this `<span>CONDITION: <!-- -->Excellent</span>` <5> # This issue is present in parsel/scrapy as well so no need to repeat it here so the user can run regex on the full text. <6> code = self.html_content <7> parser = html.HTMLParser( <8> recover=True, remove_blank_text=True, remove_comments=True, encoding=self.encoding, <9> compact=True, huge_tree=self.__huge_tree_enabled, default_doctype=True <10> ) <11> fragment_root = html.fragment_fromstring(code, parser=parser) <12> self.__text = TextHandler(fragment_root.text) <13> else: <14> self.__text = TextHandler(self._root.text) <15> return self.__text <16>
===========unchanged ref 0=========== at: scrapling.custom_types TextHandler(o: object=...) TextHandler(o: bytes, encoding: str=..., errors: str=...) at: scrapling.parser.Adaptor __slots__ = ( 'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug', '__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag', ) body = html_content at: scrapling.parser.Adaptor.__init__ self._root = root self._root = etree.fromstring(body, parser=parser, base_url=url) self.__keep_comments = keep_comments self.__huge_tree_enabled = huge_tree self.encoding = encoding self.__text = None
scrapling.engines.static/StaticEngine.__init__
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<3>:<add> self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
# module: scrapling.engines.static class StaticEngine: def __init__( self, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, + adaptor_arguments: Dict = None ): <0> self.timeout = timeout <1> self.follow_redirects = bool(follow_redirects) <2> self._extra_headers = generate_headers(browser_mode=False) <3>
===========changed ref 0=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 1=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 2=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 3=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 4=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 5=========== + # module: scrapling.core.custom_types + + ===========changed ref 6=========== + # module: scrapling.core.mixins + + ===========changed ref 7=========== + # module: scrapling.core.translator + + ===========changed ref 8=========== + # module: scrapling.core.utils + + ===========changed ref 9=========== + # module: scrapling.core + + ===========changed ref 10=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 11=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 12=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 13=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 14=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 15=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 16=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 17=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 18=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 19=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 20=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 21=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 22=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 23=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 24=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None + ===========changed ref 25=========== + # module: scrapling.core.custom_types + class TextHandler(str): + """Extends standard Python string by adding more functionality""" + __slots__ = () + ===========changed ref 26=========== + # module: scrapling.core.storage_adaptors + @cache(None, typed=True) + class SQLiteStorageSystem(StorageSystemMixin): + def __del__(self): + """To ensure all connections are closed when the object is destroyed.""" + self.close() + ===========changed ref 27=========== + # module: scrapling.engines.toolbelt.custom + # Pew Pew + def do_nothing(page): + # Just works as a filler for `page_action` argument in browser engines + return page + ===========changed ref 28=========== + # module: scrapling.engines.toolbelt.custom + @dataclass(frozen=True) + class Response: + def __repr__(self): + return f'<{self.__class__.__name__} [{self.status} {self.reason}]>' + ===========changed ref 29=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def get(self, key, default=None): + """Acts like standard dictionary `.get()` method""" + return self._data.get(key, default) + ===========changed ref 30=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + @property + def json_string(self): + """Convert current attributes to JSON string if the attributes are JSON serializable otherwise throws error""" + return dumps(dict(self._data)) + ===========changed ref 31=========== + # module: scrapling.core.custom_types + class TextHandler(str): + def sort(self, reverse: bool = False) -> str: + """Return a sorted version of the string""" + return self.__class__("".join(sorted(self, reverse=reverse))) + ===========changed ref 32=========== + # module: scrapling.core.translator + regex = f"[{HTML5_WHITESPACE}]+" + replace_html5_whitespaces = re.compile(regex).sub + ===========changed ref 33=========== + # module: scrapling.core.translator + class TranslatorMixin: + @staticmethod + def xpath_text_simple_pseudo_element(xpath: OriginalXPathExpr) -> XPathExpr: + """Support selecting text nodes using ::text pseudo-element""" + return XPathExpr.from_xpath(xpath, textnode=True) + ===========changed ref 34=========== + # module: scrapling.engines.toolbelt.navigation + def js_bypass_path(filename): + current_directory = os.path.dirname(__file__) + return os.path.join(current_directory, 'bypasses', filename) + ===========changed ref 35=========== + # module: scrapling.core.storage_adaptors + class StorageSystemMixin(ABC): + # If you want to make your own storage system, you have to inherit from this + def __init__(self, url: Union[str, None] = None): + """ + :param url: URL of the website we are working on to separate it from other websites data + """ + self.url = url + ===========changed ref 36=========== + # module: scrapling.core.mixins + class SelectorsGeneration: + @property + def css_selector(self) -> str: + """Generate a CSS selector for the current element + :return: A string of the generated selector. + """ + return self.__general_selection() + ===========changed ref 37=========== + # module: scrapling.core.utils + def _is_iterable(s: Any): + # This will be used only in regex functions to make sure it's iterable but not string/bytes + return isinstance(s, (list, tuple,)) + ===========changed ref 38=========== + # module: scrapling.core.mixins + class SelectorsGeneration: + @property + def xpath_selector(self) -> str: + """Generate a XPath selector for the current element + :return: A string of the generated selector. + """ + return self.__general_selection('xpath') + ===========changed ref 39=========== + # module: scrapling.core.translator + class TranslatorMixin: + """This mixin adds support to CSS pseudo elements via dynamic dispatch. + + Currently supported pseudo-elements are ``::text`` and ``::attr(ATTR_NAME)``. + """ +
scrapling.engines.static/StaticEngine.get
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<2>:<add> return self._prepare_response(request) <del> return request.text
# module: scrapling.engines.static class StaticEngine: def get(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): <0> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) <1> request = httpx.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) <2> return request.text <3>
===========changed ref 0=========== # module: scrapling.engines.static class StaticEngine: def __init__( self, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, + adaptor_arguments: Dict = None ): self.timeout = timeout self.follow_redirects = bool(follow_redirects) self._extra_headers = generate_headers(browser_mode=False) + self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {} ===========changed ref 1=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 2=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 3=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 4=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 5=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 6=========== + # module: scrapling.core.custom_types + + ===========changed ref 7=========== + # module: scrapling.core.mixins + + ===========changed ref 8=========== + # module: scrapling.core.translator + + ===========changed ref 9=========== + # module: scrapling.core.utils + + ===========changed ref 10=========== + # module: scrapling.core + + ===========changed ref 11=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 12=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 13=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 14=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 15=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 16=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 17=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 18=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 19=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 20=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 21=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 22=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 23=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 24=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 25=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None + ===========changed ref 26=========== + # module: scrapling.core.custom_types + class TextHandler(str): + """Extends standard Python string by adding more functionality""" + __slots__ = () + ===========changed ref 27=========== + # module: scrapling.core.storage_adaptors + @cache(None, typed=True) + class SQLiteStorageSystem(StorageSystemMixin): + def __del__(self): + """To ensure all connections are closed when the object is destroyed.""" + self.close() + ===========changed ref 28=========== + # module: scrapling.engines.toolbelt.custom + # Pew Pew + def do_nothing(page): + # Just works as a filler for `page_action` argument in browser engines + return page + ===========changed ref 29=========== + # module: scrapling.engines.toolbelt.custom + @dataclass(frozen=True) + class Response: + def __repr__(self): + return f'<{self.__class__.__name__} [{self.status} {self.reason}]>' + ===========changed ref 30=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def get(self, key, default=None): + """Acts like standard dictionary `.get()` method""" + return self._data.get(key, default) + ===========changed ref 31=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + @property + def json_string(self): + """Convert current attributes to JSON string if the attributes are JSON serializable otherwise throws error""" + return dumps(dict(self._data)) + ===========changed ref 32=========== + # module: scrapling.core.custom_types + class TextHandler(str): + def sort(self, reverse: bool = False) -> str: + """Return a sorted version of the string""" + return self.__class__("".join(sorted(self, reverse=reverse))) + ===========changed ref 33=========== + # module: scrapling.core.translator + regex = f"[{HTML5_WHITESPACE}]+" + replace_html5_whitespaces = re.compile(regex).sub + ===========changed ref 34=========== + # module: scrapling.core.translator + class TranslatorMixin: + @staticmethod + def xpath_text_simple_pseudo_element(xpath: OriginalXPathExpr) -> XPathExpr: + """Support selecting text nodes using ::text pseudo-element""" + return XPathExpr.from_xpath(xpath, textnode=True) + ===========changed ref 35=========== + # module: scrapling.engines.toolbelt.navigation + def js_bypass_path(filename): + current_directory = os.path.dirname(__file__) + return os.path.join(current_directory, 'bypasses', filename) + ===========changed ref 36=========== + # module: scrapling.core.storage_adaptors + class StorageSystemMixin(ABC): + # If you want to make your own storage system, you have to inherit from this + def __init__(self, url: Union[str, None] = None): + """ + :param url: URL of the website we are working on to separate it from other websites data + """ + self.url = url + ===========changed ref 37=========== + # module: scrapling.core.mixins + class SelectorsGeneration: + @property + def css_selector(self) -> str: + """Generate a CSS selector for the current element + :return: A string of the generated selector. + """ + return self.__general_selection() + ===========changed ref 38=========== + # module: scrapling.core.utils + def _is_iterable(s: Any): + # This will be used only in regex functions to make sure it's iterable but not string/bytes + return isinstance(s, (list, tuple,)) +
scrapling.engines.static/StaticEngine.post
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<2>:<add> return self._prepare_response(request) <del> return request.text
# module: scrapling.engines.static class StaticEngine: def post(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): <0> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) <1> request = httpx.post(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) <2> return request.text <3>
===========changed ref 0=========== # module: scrapling.engines.static class StaticEngine: def get(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) request = httpx.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) + return self._prepare_response(request) - return request.text ===========changed ref 1=========== # module: scrapling.engines.static class StaticEngine: def __init__( self, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, + adaptor_arguments: Dict = None ): self.timeout = timeout self.follow_redirects = bool(follow_redirects) self._extra_headers = generate_headers(browser_mode=False) + self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {} ===========changed ref 2=========== # module: scrapling.engines.static class StaticEngine: + def _prepare_response(self, response: httpxResponse): + return Response( + url=str(response.url), + text=response.text, + content=response.content, + status=response.status_code, + reason=response.reason_phrase, + encoding=response.encoding or 'utf-8', + cookies=dict(response.cookies), + headers=dict(response.headers), + request_headers=response.request.headers, + adaptor_arguments=self.adaptor_arguments + ) + ===========changed ref 3=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 4=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 5=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 6=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 7=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 8=========== + # module: scrapling.core.custom_types + + ===========changed ref 9=========== + # module: scrapling.core.mixins + + ===========changed ref 10=========== + # module: scrapling.core.translator + + ===========changed ref 11=========== + # module: scrapling.core.utils + + ===========changed ref 12=========== + # module: scrapling.core + + ===========changed ref 13=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 14=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 15=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 16=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 17=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 18=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 19=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 20=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 21=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 22=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 23=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 24=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 25=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 26=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 27=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None + ===========changed ref 28=========== + # module: scrapling.core.custom_types + class TextHandler(str): + """Extends standard Python string by adding more functionality""" + __slots__ = () + ===========changed ref 29=========== + # module: scrapling.core.storage_adaptors + @cache(None, typed=True) + class SQLiteStorageSystem(StorageSystemMixin): + def __del__(self): + """To ensure all connections are closed when the object is destroyed.""" + self.close() + ===========changed ref 30=========== + # module: scrapling.engines.toolbelt.custom + # Pew Pew + def do_nothing(page): + # Just works as a filler for `page_action` argument in browser engines + return page + ===========changed ref 31=========== + # module: scrapling.engines.toolbelt.custom + @dataclass(frozen=True) + class Response: + def __repr__(self): + return f'<{self.__class__.__name__} [{self.status} {self.reason}]>' + ===========changed ref 32=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def get(self, key, default=None): + """Acts like standard dictionary `.get()` method""" + return self._data.get(key, default) + ===========changed ref 33=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + @property + def json_string(self): + """Convert current attributes to JSON string if the attributes are JSON serializable otherwise throws error""" + return dumps(dict(self._data)) + ===========changed ref 34=========== + # module: scrapling.core.custom_types + class TextHandler(str): + def sort(self, reverse: bool = False) -> str: + """Return a sorted version of the string""" + return self.__class__("".join(sorted(self, reverse=reverse))) + ===========changed ref 35=========== + # module: scrapling.core.translator + regex = f"[{HTML5_WHITESPACE}]+" + replace_html5_whitespaces = re.compile(regex).sub + ===========changed ref 36=========== + # module: scrapling.core.translator + class TranslatorMixin: + @staticmethod + def xpath_text_simple_pseudo_element(xpath: OriginalXPathExpr) -> XPathExpr: + """Support selecting text nodes using ::text pseudo-element""" + return XPathExpr.from_xpath(xpath, textnode=True) + ===========changed ref 37=========== + # module: scrapling.engines.toolbelt.navigation + def js_bypass_path(filename): + current_directory = os.path.dirname(__file__) + return os.path.join(current_directory, 'bypasses', filename) +
scrapling.engines.static/StaticEngine.delete
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<2>:<add> return self._prepare_response(request) <del> return request.text
# module: scrapling.engines.static class StaticEngine: def delete(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): <0> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) <1> request = httpx.delete(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) <2> return request.text <3>
===========changed ref 0=========== # module: scrapling.engines.static class StaticEngine: def post(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) request = httpx.post(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) + return self._prepare_response(request) - return request.text ===========changed ref 1=========== # module: scrapling.engines.static class StaticEngine: def get(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) request = httpx.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) + return self._prepare_response(request) - return request.text ===========changed ref 2=========== # module: scrapling.engines.static class StaticEngine: def __init__( self, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, + adaptor_arguments: Dict = None ): self.timeout = timeout self.follow_redirects = bool(follow_redirects) self._extra_headers = generate_headers(browser_mode=False) + self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {} ===========changed ref 3=========== # module: scrapling.engines.static class StaticEngine: + def _prepare_response(self, response: httpxResponse): + return Response( + url=str(response.url), + text=response.text, + content=response.content, + status=response.status_code, + reason=response.reason_phrase, + encoding=response.encoding or 'utf-8', + cookies=dict(response.cookies), + headers=dict(response.headers), + request_headers=response.request.headers, + adaptor_arguments=self.adaptor_arguments + ) + ===========changed ref 4=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 5=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 6=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 7=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 8=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 9=========== + # module: scrapling.core.custom_types + + ===========changed ref 10=========== + # module: scrapling.core.mixins + + ===========changed ref 11=========== + # module: scrapling.core.translator + + ===========changed ref 12=========== + # module: scrapling.core.utils + + ===========changed ref 13=========== + # module: scrapling.core + + ===========changed ref 14=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 15=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 16=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 17=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 18=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 19=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 20=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 21=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 22=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 23=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 24=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 25=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 26=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 27=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 28=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None + ===========changed ref 29=========== + # module: scrapling.core.custom_types + class TextHandler(str): + """Extends standard Python string by adding more functionality""" + __slots__ = () + ===========changed ref 30=========== + # module: scrapling.core.storage_adaptors + @cache(None, typed=True) + class SQLiteStorageSystem(StorageSystemMixin): + def __del__(self): + """To ensure all connections are closed when the object is destroyed.""" + self.close() + ===========changed ref 31=========== + # module: scrapling.engines.toolbelt.custom + # Pew Pew + def do_nothing(page): + # Just works as a filler for `page_action` argument in browser engines + return page + ===========changed ref 32=========== + # module: scrapling.engines.toolbelt.custom + @dataclass(frozen=True) + class Response: + def __repr__(self): + return f'<{self.__class__.__name__} [{self.status} {self.reason}]>' + ===========changed ref 33=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def get(self, key, default=None): + """Acts like standard dictionary `.get()` method""" + return self._data.get(key, default) + ===========changed ref 34=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + @property + def json_string(self): + """Convert current attributes to JSON string if the attributes are JSON serializable otherwise throws error""" + return dumps(dict(self._data)) + ===========changed ref 35=========== + # module: scrapling.core.custom_types + class TextHandler(str): + def sort(self, reverse: bool = False) -> str: + """Return a sorted version of the string""" + return self.__class__("".join(sorted(self, reverse=reverse))) + ===========changed ref 36=========== + # module: scrapling.core.translator + regex = f"[{HTML5_WHITESPACE}]+" + replace_html5_whitespaces = re.compile(regex).sub +
scrapling.engines.static/StaticEngine.put
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<2>:<add> return self._prepare_response(request) <del> return request.text
# module: scrapling.engines.static class StaticEngine: def put(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): <0> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) <1> request = httpx.put(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) <2> return request.text <3>
===========changed ref 0=========== # module: scrapling.engines.static class StaticEngine: def delete(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) request = httpx.delete(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) + return self._prepare_response(request) - return request.text ===========changed ref 1=========== # module: scrapling.engines.static class StaticEngine: def post(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) request = httpx.post(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) + return self._prepare_response(request) - return request.text ===========changed ref 2=========== # module: scrapling.engines.static class StaticEngine: def get(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict): headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers) request = httpx.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs) + return self._prepare_response(request) - return request.text ===========changed ref 3=========== # module: scrapling.engines.static class StaticEngine: def __init__( self, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, + adaptor_arguments: Dict = None ): self.timeout = timeout self.follow_redirects = bool(follow_redirects) self._extra_headers = generate_headers(browser_mode=False) + self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {} ===========changed ref 4=========== # module: scrapling.engines.static class StaticEngine: + def _prepare_response(self, response: httpxResponse): + return Response( + url=str(response.url), + text=response.text, + content=response.content, + status=response.status_code, + reason=response.reason_phrase, + encoding=response.encoding or 'utf-8', + cookies=dict(response.cookies), + headers=dict(response.headers), + request_headers=response.request.headers, + adaptor_arguments=self.adaptor_arguments + ) + ===========changed ref 5=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 6=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 7=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 8=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 9=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 10=========== + # module: scrapling.core.custom_types + + ===========changed ref 11=========== + # module: scrapling.core.mixins + + ===========changed ref 12=========== + # module: scrapling.core.translator + + ===========changed ref 13=========== + # module: scrapling.core.utils + + ===========changed ref 14=========== + # module: scrapling.core + + ===========changed ref 15=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 16=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 17=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 18=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 19=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 20=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 21=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 22=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 23=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 24=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 25=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 26=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 27=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 28=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 29=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None + ===========changed ref 30=========== + # module: scrapling.core.custom_types + class TextHandler(str): + """Extends standard Python string by adding more functionality""" + __slots__ = () + ===========changed ref 31=========== + # module: scrapling.core.storage_adaptors + @cache(None, typed=True) + class SQLiteStorageSystem(StorageSystemMixin): + def __del__(self): + """To ensure all connections are closed when the object is destroyed.""" + self.close() + ===========changed ref 32=========== + # module: scrapling.engines.toolbelt.custom + # Pew Pew + def do_nothing(page): + # Just works as a filler for `page_action` argument in browser engines + return page + ===========changed ref 33=========== + # module: scrapling.engines.toolbelt.custom + @dataclass(frozen=True) + class Response: + def __repr__(self): + return f'<{self.__class__.__name__} [{self.status} {self.reason}]>' + ===========changed ref 34=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def get(self, key, default=None): + """Acts like standard dictionary `.get()` method""" + return self._data.get(key, default) + ===========changed ref 35=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + @property + def json_string(self): + """Convert current attributes to JSON string if the attributes are JSON serializable otherwise throws error""" + return dumps(dict(self._data)) +
scrapling.engines.pw/PlaywrightEngine.__init__
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<12>:<add> self.page_action = do_nothing <del> self.page_action = _do_nothing <19>:<add> self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
<s> page_action: Callable = _do_nothing, wait_selector: Optional[str] = None, wait_selector_state: Optional[str] = 'attached', stealth: bool = False, hide_canvas: bool = True, disable_webgl: bool = False, cdp_url: Optional[str] = None, nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None, + adaptor_arguments: Dict = None ): <0> self.headless = headless <1> self.disable_resources = disable_resources <2> self.network_idle = bool(network_idle) <3> self.stealth = bool(stealth) <4> self.hide_canvas = bool(hide_canvas) <5> self.disable_webgl = bool(disable_webgl) <6> self.cdp_url = cdp_url <7> self.useragent = useragent <8> self.timeout = check_type_validity(timeout, [int, float], 30000) <9> if callable(page_action): <10> self.page_action = page_action <11> else: <12> self.page_action = _do_nothing <13> logging.error('[Ignored] Argument "page_action" must be callable') <14> <15> self.wait_selector = wait_selector <16> self.wait_selector_state = wait_selector_state <17> self.nstbrowser_mode = bool(nstbrowser_mode) <18> self.nstbrowser_config = nstbrowser_config <19>
===========unchanged ref 0=========== at: logging error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: scrapling.engines.pw _do_nothing(page) at: scrapling.engines.tools check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any at: typing Callable = _CallableType(collections.abc.Callable, 2) List = _alias(list, 1, inst=False, name='List') Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== # module: scrapling.engines.pw - def _do_nothing(page): - # Anything - return page - ===========changed ref 1=========== # module: scrapling.engines.pw - # Disable loading these resources for speed - DEFAULT_DISABLED_RESOURCES = ['beacon', 'csp_report', 'font', 'image', 'imageset', 'media', 'object', 'texttrack', 'stylesheet', 'websocket'] - DEFAULT_STEALTH_FLAGS = [ - # Explanation: https://peter.sh/experiments/chromium-command-line-switches/ - # Generally this will make the browser faster and less detectable - '--incognito', '--accept-lang=en-US', '--lang=en-US', '--no-pings', '--mute-audio', '--no-first-run', '--no-default-browser-check', '--disable-cloud-import', - '--disable-gesture-typing', '--disable-offer-store-unmasked-wallet-cards', '--disable-offer-upload-credit-cards', '--disable-print-preview', '--disable-voice-input', - '--disable-wake-on-wifi', '--disable-cookie-encryption', '--ignore-gpu-blocklist', '--enable-async-dns', '--enable-simple-cache-backend', '--enable-tcp-fast-open', - '--prerender-from-omnibox=disabled', '--enable-web-bluetooth', '--disable-features=AudioServiceOutOfProcess,IsolateOrigins,site-per-process,TranslateUI,BlinkGenPropertyTrees', - '--aggressive-cache-discard', '--disable-ipc-flooding-protection', '--disable-blink-features=AutomationControlled', '--test-type', - '--enable-features=NetworkService,NetworkServiceInProcess,TrustTokens,TrustTokensAlwaysAllowIssuance', - '--disable-breakpad', '--disable-component-update', '--disable-domain-reliability', '--disable-sync', '--disable-client-side-phishing-detection', - '--disable-hang-monitor', '--disable-popup-blocking', '--disable-prompt-on-repost', '--metrics-recording-only', '--safebrowsing-disable-auto-update', '--password-store=basic', - '--</s> ===========changed ref 2=========== # module: scrapling.engines.pw # offset: 1 <s>metrics-recording-only', '--safebrowsing-disable-auto-update', '--password-store=basic', - '--autoplay-policy=no-user-gesture-required', '--use-mock-keychain', '--force-webrtc-ip-handling-policy=disable_non_proxied_udp', - '--webrtc-ip-handling-policy=disable_non_proxied_udp', '--disable-session-crashed-bubble', '--disable-crash-reporter', '--disable-dev-shm-usage', '--force-color-profile=srgb', - '--disable-translate', '--disable-background-networking', '--disable-background-timer-throttling', '--disable-backgrounding-occluded-windows', '--disable-infobars', - '--hide-scrollbars', '--disable-renderer-backgrounding', '--font-render-hinting=none', '--disable-logging', '--enable-surface-synchronization', - '--run-all-compositor-stages-before-draw', '--disable-threaded-animation', '--disable-threaded-scrolling', '--disable-checker-imaging', - '--disable-new-content-rendering-timeout', '--disable-image-animation-resync', '--disable-partial-raster', - '--blink-settings=primaryHoverType=2,availableHoverTypes=2,primaryPointerType=4,availablePointerTypes=4', - '--disable-layer-tree-host-memory-pressure', - '--window-position=0,0', - '--disable-features=site-per-process', - '--disable-default-apps', - '--disable-component-extensions-with-background-pages', - '--disable-extensions', - # "--disable-reading-from-canvas", # For Firefox - '--start-maximized' # For headless check bypass - ] - ===========changed ref 3=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 4=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 5=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 6=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 7=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 8=========== + # module: scrapling.core.custom_types + + ===========changed ref 9=========== + # module: scrapling.core.mixins + + ===========changed ref 10=========== + # module: scrapling.core.translator + + ===========changed ref 11=========== + # module: scrapling.core.utils + + ===========changed ref 12=========== + # module: scrapling.core + + ===========changed ref 13=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 14=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 15=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 16=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 17=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 18=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 19=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 20=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 21=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 22=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 23=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 24=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ +
scrapling.engines.pw/PlaywrightEngine._cdp_url_logic
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<5>:<del> # Defaulting to the docker mode, token doesn't matter in it as it's passed for the container <6>:<del> query = { <7>:<del> "once": True, <8>:<del> "headless": True, <9>:<del> "autoClose": True, <10>:<del> "fingerprint": { <11>:<del> "flags": { <12>:<del> "timezone": "BasedOnIp", <13>:<del> "screen": "Custom" <14>:<del> }, <15>:<del> "platform": 'linux', # support: windows, mac, linux <16>:<del> "kernel": 'chromium', # only support: chromium <17>:<del> "kernelMilestone": '128', <18>:<del> "hardwareConcurrency": 8, <19>:<del> "deviceMemory": 8, <20>:<del> }, <21>:<del> } <22>:<add> query = NSTBROWSER_DEFAULT_QUERY.copy()
# module: scrapling.engines.pw class PlaywrightEngine: def _cdp_url_logic(self, flags: Optional[dict] = None): <0> cdp_url = self.cdp_url <1> if self.nstbrowser_mode: <2> if self.nstbrowser_config and type(self.nstbrowser_config) is Dict: <3> config = self.nstbrowser_config <4> else: <5> # Defaulting to the docker mode, token doesn't matter in it as it's passed for the container <6> query = { <7> "once": True, <8> "headless": True, <9> "autoClose": True, <10> "fingerprint": { <11> "flags": { <12> "timezone": "BasedOnIp", <13> "screen": "Custom" <14> }, <15> "platform": 'linux', # support: windows, mac, linux <16> "kernel": 'chromium', # only support: chromium <17> "kernelMilestone": '128', <18> "hardwareConcurrency": 8, <19> "deviceMemory": 8, <20> }, <21> } <22> if flags: <23> query.update({ <24> "args": dict(zip(flags, [''] * len(flags))), # browser args should be a dictionary <25> }) <26> <27> config = { <28> 'config': json.dumps(query), <29> # 'token': '' <30> } <31> cdp_url = construct_websocket_url(cdp_url, config) <32> <33> return cdp_url <34>
===========unchanged ref 0=========== at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: scrapling.engines.pw.PlaywrightEngine.__init__ self.cdp_url = cdp_url self.nstbrowser_mode = bool(nstbrowser_mode) self.nstbrowser_config = nstbrowser_config at: scrapling.engines.tools construct_websocket_url(base_url, query_params) at: typing Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== # module: scrapling.engines.pw - def _do_nothing(page): - # Anything - return page - ===========changed ref 1=========== <s> page_action: Callable = _do_nothing, wait_selector: Optional[str] = None, wait_selector_state: Optional[str] = 'attached', stealth: bool = False, hide_canvas: bool = True, disable_webgl: bool = False, cdp_url: Optional[str] = None, nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None, + adaptor_arguments: Dict = None ): self.headless = headless self.disable_resources = disable_resources self.network_idle = bool(network_idle) self.stealth = bool(stealth) self.hide_canvas = bool(hide_canvas) self.disable_webgl = bool(disable_webgl) self.cdp_url = cdp_url self.useragent = useragent self.timeout = check_type_validity(timeout, [int, float], 30000) if callable(page_action): self.page_action = page_action else: + self.page_action = do_nothing - self.page_action = _do_nothing logging.error('[Ignored] Argument "page_action" must be callable') self.wait_selector = wait_selector self.wait_selector_state = wait_selector_state self.nstbrowser_mode = bool(nstbrowser_mode) self.nstbrowser_config = nstbrowser_config + self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {} ===========changed ref 2=========== # module: scrapling.engines.pw - # Disable loading these resources for speed - DEFAULT_DISABLED_RESOURCES = ['beacon', 'csp_report', 'font', 'image', 'imageset', 'media', 'object', 'texttrack', 'stylesheet', 'websocket'] - DEFAULT_STEALTH_FLAGS = [ - # Explanation: https://peter.sh/experiments/chromium-command-line-switches/ - # Generally this will make the browser faster and less detectable - '--incognito', '--accept-lang=en-US', '--lang=en-US', '--no-pings', '--mute-audio', '--no-first-run', '--no-default-browser-check', '--disable-cloud-import', - '--disable-gesture-typing', '--disable-offer-store-unmasked-wallet-cards', '--disable-offer-upload-credit-cards', '--disable-print-preview', '--disable-voice-input', - '--disable-wake-on-wifi', '--disable-cookie-encryption', '--ignore-gpu-blocklist', '--enable-async-dns', '--enable-simple-cache-backend', '--enable-tcp-fast-open', - '--prerender-from-omnibox=disabled', '--enable-web-bluetooth', '--disable-features=AudioServiceOutOfProcess,IsolateOrigins,site-per-process,TranslateUI,BlinkGenPropertyTrees', - '--aggressive-cache-discard', '--disable-ipc-flooding-protection', '--disable-blink-features=AutomationControlled', '--test-type', - '--enable-features=NetworkService,NetworkServiceInProcess,TrustTokens,TrustTokensAlwaysAllowIssuance', - '--disable-breakpad', '--disable-component-update', '--disable-domain-reliability', '--disable-sync', '--disable-client-side-phishing-detection', - '--disable-hang-monitor', '--disable-popup-blocking', '--disable-prompt-on-repost', '--metrics-recording-only', '--safebrowsing-disable-auto-update', '--password-store=basic', - '--</s> ===========changed ref 3=========== # module: scrapling.engines.pw # offset: 1 <s>metrics-recording-only', '--safebrowsing-disable-auto-update', '--password-store=basic', - '--autoplay-policy=no-user-gesture-required', '--use-mock-keychain', '--force-webrtc-ip-handling-policy=disable_non_proxied_udp', - '--webrtc-ip-handling-policy=disable_non_proxied_udp', '--disable-session-crashed-bubble', '--disable-crash-reporter', '--disable-dev-shm-usage', '--force-color-profile=srgb', - '--disable-translate', '--disable-background-networking', '--disable-background-timer-throttling', '--disable-backgrounding-occluded-windows', '--disable-infobars', - '--hide-scrollbars', '--disable-renderer-backgrounding', '--font-render-hinting=none', '--disable-logging', '--enable-surface-synchronization', - '--run-all-compositor-stages-before-draw', '--disable-threaded-animation', '--disable-threaded-scrolling', '--disable-checker-imaging', - '--disable-new-content-rendering-timeout', '--disable-image-animation-resync', '--disable-partial-raster', - '--blink-settings=primaryHoverType=2,availableHoverTypes=2,primaryPointerType=4,availablePointerTypes=4', - '--disable-layer-tree-host-memory-pressure', - '--window-position=0,0', - '--disable-features=site-per-process', - '--disable-default-apps', - '--disable-component-extensions-with-background-pages', - '--disable-extensions', - # "--disable-reading-from-canvas", # For Firefox - '--start-maximized' # For headless check bypass - ] - ===========changed ref 4=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 5=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 6=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 7=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 8=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 9=========== + # module: scrapling.core.custom_types + + ===========changed ref 10=========== + # module: scrapling.core.mixins + + ===========changed ref 11=========== + # module: scrapling.core.translator + + ===========changed ref 12=========== + # module: scrapling.core.utils + + ===========changed ref 13=========== + # module: scrapling.core + + ===========changed ref 14=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 15=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass +
scrapling.engines.pw/PlaywrightEngine.fetch
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
# module: scrapling.engines.pw class PlaywrightEngine: + def fetch(self, url) -> Response: - def fetch(self, url): <0> if not self.stealth: <1> from playwright.sync_api import sync_playwright <2> else: <3> from rebrowser_playwright.sync_api import sync_playwright <4> <5> with sync_playwright() as p: <6> # Handle the UserAgent early <7> if self.useragent: <8> extra_headers = {} <9> useragent = self.useragent <10> else: <11> extra_headers = generate_headers(browser_mode=True) <12> useragent = extra_headers.get('User-Agent') <13> <14> # Prepare the flags before diving <15> flags = DEFAULT_STEALTH_FLAGS <16> if self.hide_canvas: <17> flags += ['--fingerprinting-canvas-image-data-noise'] <18> if self.disable_webgl: <19> flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2'] <20> <21> # Creating the browser <22> if self.cdp_url: <23> cdp_url = self._cdp_url_logic(flags if self.stealth else None) <24> browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url) <25> else: <26> if self.stealth: <27> browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True) <28> else: <29> browser = p.chromium.launch(headless=self.headless, ignore_default_args=['--enable-automation']) <30> <31> # Creating the context <32> if self.stealth: <33> context = browser.new_context( <34> locale='en-US', <35> is_mobile=False, <36> has_touch=False, <37> color_scheme='dark',</s>
===========below chunk 0=========== # module: scrapling.engines.pw class PlaywrightEngine: + def fetch(self, url) -> Response: - def fetch(self, url): # offset: 1 user_agent=useragent, device_scale_factor=2, # I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now service_workers="allow", ignore_https_errors=True, extra_http_headers=extra_headers, screen={"width": 1920, "height": 1080}, viewport={"width": 1920, "height": 1080}, permissions=["geolocation", 'notifications'], ) else: context = browser.new_context( color_scheme='dark', user_agent=useragent, device_scale_factor=2, extra_http_headers=extra_headers ) # Finally we are in business page = context.new_page() page.set_default_navigation_timeout(self.timeout) page.set_default_timeout(self.timeout) if self.stealth: # Basic bypasses nothing fancy as I'm still working on it # But with adding these bypasses to the above config, it bypasses many online tests like # https://bot.sannysoft.com/ # https://kaliiiiiiiiii.github.io/brotector/ # https://pixelscan.net/ # https://iphey.com/ # https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint # https://arh.antoinevastel.com/bots/areyouheadless/ # https://prescience-data.github.io/execution-monitor.html page.add_init_script(path=js_bypass_path('webdriver_fully.js')) page.add_init_script(path=js_bypass_path('window_chrome.js')) page.add_init_script(path</s> ===========below chunk 1=========== # module: scrapling.engines.pw class PlaywrightEngine: + def fetch(self, url) -> Response: - def fetch(self, url): # offset: 2 <s>init_script(path=js_bypass_path('window_chrome.js')) page.add_init_script(path=js_bypass_path('navigator_plugins.js')) page.add_init_script(path=js_bypass_path('pdf_viewer.js')) page.add_init_script(path=js_bypass_path('notification_permission.js')) page.add_init_script(path=js_bypass_path('screen_props.js')) page.add_init_script(path=js_bypass_path('playwright_fingerprint.js')) page.goto(url, referer=generate_convincing_referer(url) if self.stealth else None) page.wait_for_load_state(state="load") page.wait_for_load_state(state="domcontentloaded") if self.network_idle: page.wait_for_load_state('networkidle') page = self.page_action(page) if self.wait_selector and type(self.wait_selector) is str: waiter = page.locator(self.wait_selector) waiter.wait_for(state=self.wait_selector_state) html = page.content() page.close() return html ===========unchanged ref 0=========== at: scrapling.engines.pw DEFAULT_STEALTH_FLAGS = [ # Explanation: https://peter.sh/experiments/chromium-command-line-switches/ # Generally this will make the browser faster and less detectable '--incognito', '--accept-lang=en-US', '--lang=en-US', '--no-pings', '--mute-audio', '--no-first-run', '--no-default-browser-check', '--disable-cloud-import', '--disable-gesture-typing', '--disable-offer-store-unmasked-wallet-cards', '--disable-offer-upload-credit-cards', '--disable-print-preview', '--disable-voice-input', '--disable-wake-on-wifi', '--disable-cookie-encryption', '--ignore-gpu-blocklist', '--enable-async-dns', '--enable-simple-cache-backend', '--enable-tcp-fast-open', '--prerender-from-omnibox=disabled', '--enable-web-bluetooth', '--disable-features=AudioServiceOutOfProcess,IsolateOrigins,site-per-process,TranslateUI,BlinkGenPropertyTrees', '--aggressive-cache-discard', '--disable-ipc-flooding-protection', '--disable-blink-features=AutomationControlled', '--test-type', '--enable-features=NetworkService,NetworkServiceInProcess,TrustTokens,TrustTokensAlwaysAllowIssuance', '--disable-breakpad', '--disable-component-update', '--disable-domain-reliability', '--disable-sync', '--disable-client-side-phishing-detection', '--disable-hang-monitor', '--disable-popup-blocking', '--disable-prompt-on-repost', '--metrics-recording-only', '--safebrowsing-disable-auto-update', '--password-store=basic', '--autoplay-policy=no-user-gesture-required', '--use-mock-keychain', '--force-webrtc-ip-handling-policy=disable_non_proxied_udp', '--webrtc-ip-handling-policy=disable_non_proxied_udp</s> ===========unchanged ref 1=========== at: scrapling.engines.pw.PlaywrightEngine _cdp_url_logic(self, flags: Optional[dict]=None) _cdp_url_logic(flags: Optional[dict]=None) at: scrapling.engines.pw.PlaywrightEngine.__init__ self.headless = headless self.stealth = bool(stealth) self.hide_canvas = bool(hide_canvas) self.disable_webgl = bool(disable_webgl) self.cdp_url = cdp_url self.useragent = useragent at: scrapling.engines.tools generate_convincing_referer(url) js_bypass_path(filename) generate_headers(browser_mode=False) ===========changed ref 0=========== # module: scrapling.engines.pw class PlaywrightEngine: def _cdp_url_logic(self, flags: Optional[dict] = None): cdp_url = self.cdp_url if self.nstbrowser_mode: if self.nstbrowser_config and type(self.nstbrowser_config) is Dict: config = self.nstbrowser_config else: - # Defaulting to the docker mode, token doesn't matter in it as it's passed for the container - query = { - "once": True, - "headless": True, - "autoClose": True, - "fingerprint": { - "flags": { - "timezone": "BasedOnIp", - "screen": "Custom" - }, - "platform": 'linux', # support: windows, mac, linux - "kernel": 'chromium', # only support: chromium - "kernelMilestone": '128', - "hardwareConcurrency": 8, - "deviceMemory": 8, - }, - } + query = NSTBROWSER_DEFAULT_QUERY.copy() if flags: query.update({ "args": dict(zip(flags, [''] * len(flags))), # browser args should be a dictionary }) config = { 'config': json.dumps(query), # 'token': '' } cdp_url = construct_websocket_url(cdp_url, config) return cdp_url
scrapling.engines.camo/CamoufoxEngine.__init__
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<8>:<add> self.page_action = do_nothing <del> self.page_action = _do_nothing <13>:<add> self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
<s>bool] = False, - block_images: Optional[bool] = True, block_webrtc: Optional[bool] = False, network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, + page_action: Callable = do_nothing, - page_action: Callable = _do_nothing, wait_selector: Optional[str] = None, wait_selector_state: str = 'attached', + adaptor_arguments: Dict = None ): <0> self.headless = headless <1> self.block_images = bool(block_images) <2> self.block_webrtc = bool(block_webrtc) <3> self.network_idle = bool(network_idle) <4> self.timeout = check_type_validity(timeout, [int, float], 30000) <5> if callable(page_action): <6> self.page_action = page_action <7> else: <8> self.page_action = _do_nothing <9> logging.error('[Ignored] Argument "page_action" must be callable') <10> <11> self.wait_selector = wait_selector <12> self.wait_selector_state = wait_selector_state <13>
===========unchanged ref 0=========== at: logging error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing Callable = _CallableType(collections.abc.Callable, 2) Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== # module: scrapling.engines.camo - def _do_nothing(page): - # Anything - return page - ===========changed ref 1=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 2=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 3=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 4=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 5=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 6=========== + # module: scrapling.core.custom_types + + ===========changed ref 7=========== + # module: scrapling.core.mixins + + ===========changed ref 8=========== + # module: scrapling.core.translator + + ===========changed ref 9=========== + # module: scrapling.core.utils + + ===========changed ref 10=========== + # module: scrapling.core + + ===========changed ref 11=========== # module: scrapling.engines.pw - - ===========changed ref 12=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 13=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 14=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 15=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 16=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 17=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 18=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 19=========== # module: scrapling.engines.pw - def _do_nothing(page): - # Anything - return page - ===========changed ref 20=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 21=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 22=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 23=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 24=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 25=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 26=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 27=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None + ===========changed ref 28=========== + # module: scrapling.core.custom_types + class TextHandler(str): + """Extends standard Python string by adding more functionality""" + __slots__ = () + ===========changed ref 29=========== + # module: scrapling.core.storage_adaptors + @cache(None, typed=True) + class SQLiteStorageSystem(StorageSystemMixin): + def __del__(self): + """To ensure all connections are closed when the object is destroyed.""" + self.close() + ===========changed ref 30=========== + # module: scrapling.engines.toolbelt.custom + # Pew Pew + def do_nothing(page): + # Just works as a filler for `page_action` argument in browser engines + return page + ===========changed ref 31=========== + # module: scrapling.engines.toolbelt.custom + @dataclass(frozen=True) + class Response: + def __repr__(self): + return f'<{self.__class__.__name__} [{self.status} {self.reason}]>' + ===========changed ref 32=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def get(self, key, default=None): + """Acts like standard dictionary `.get()` method""" + return self._data.get(key, default) + ===========changed ref 33=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + @property + def json_string(self): + """Convert current attributes to JSON string if the attributes are JSON serializable otherwise throws error""" + return dumps(dict(self._data)) + ===========changed ref 34=========== + # module: scrapling.core.custom_types + class TextHandler(str): + def sort(self, reverse: bool = False) -> str: + """Return a sorted version of the string""" + return self.__class__("".join(sorted(self, reverse=reverse))) + ===========changed ref 35=========== + # module: scrapling.core.translator + regex = f"[{HTML5_WHITESPACE}]+" + replace_html5_whitespaces = re.compile(regex).sub + ===========changed ref 36=========== + # module: scrapling.core.translator + class TranslatorMixin: + @staticmethod + def xpath_text_simple_pseudo_element(xpath: OriginalXPathExpr) -> XPathExpr: + """Support selecting text nodes using ::text pseudo-element""" + return XPathExpr.from_xpath(xpath, textnode=True) + ===========changed ref 37=========== + # module: scrapling.engines.toolbelt.navigation + def js_bypass_path(filename): + current_directory = os.path.dirname(__file__) + return os.path.join(current_directory, 'bypasses', filename) + ===========changed ref 38=========== + # module: scrapling.core.storage_adaptors + class StorageSystemMixin(ABC): + # If you want to make your own storage system, you have to inherit from this + def __init__(self, url: Union[str, None] = None): + """ + :param url: URL of the website we are working on to separate it from other websites data + """ + self.url = url + ===========changed ref 39=========== + # module: scrapling.core.mixins + class SelectorsGeneration: + @property + def css_selector(self) -> str: + """Generate a CSS selector for the current element + :return: A string of the generated selector. + """ + return self.__general_selection() + ===========changed ref 40=========== + # module: scrapling.core.utils + def _is_iterable(s: Any): + # This will be used only in regex functions to make sure it's iterable but not string/bytes + return isinstance(s, (list, tuple,)) +
scrapling.engines.camo/CamoufoxEngine.fetch
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<2>:<add> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful <del> block_images=self.block_images, <9>:<add> res = page.goto(url, referer=generate_convincing_referer(url)) <del> page.goto(url, referer=generate_convincing_referer(url)) <21>:<add> content_type = res.headers.get('content-type', '') <add> # Parse charset from content-type <add> encoding = 'utf-8' # default encoding <add> if 'charset=' in content_type.lower(): <add> encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip() <add> <add> response = Response( <add> url=res.url, <add> text=res.text(), <add> content=res.body(), <add> status=res.status, <add> reason=res.status_text, <add> encoding=encoding, <add> cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()}, <add>
# module: scrapling.engines.camo class CamoufoxEngine: + def fetch(self, url: str) -> Response: - def fetch(self, url: str): <0> with Camoufox( <1> headless=self.headless, <2> block_images=self.block_images, <3> os=get_os_name(), <4> block_webrtc=self.block_webrtc, <5> ) as browser: <6> page = browser.new_page() <7> page.set_default_navigation_timeout(self.timeout) <8> page.set_default_timeout(self.timeout) <9> page.goto(url, referer=generate_convincing_referer(url)) <10> page.wait_for_load_state(state="load") <11> page.wait_for_load_state(state="domcontentloaded") <12> if self.network_idle: <13> page.wait_for_load_state('networkidle') <14> <15> page = self.page_action(page) <16> <17> if self.wait_selector and type(self.wait_selector) is str: <18> waiter = page.locator(self.wait_selector) <19> waiter.wait_for(state=self.wait_selector_state) <20> <21> html = page.content() <22> page.close() <23> return html <24>
===========unchanged ref 0=========== at: scrapling.engines.camo.CamoufoxEngine.__init__ self.headless = headless self.block_images = bool(block_images) self.block_webrtc = bool(block_webrtc) self.network_idle = bool(network_idle) self.timeout = check_type_validity(timeout, [int, float], 30000) self.page_action = do_nothing self.page_action = page_action self.wait_selector = wait_selector ===========changed ref 0=========== # module: scrapling.engines.camo - def _do_nothing(page): - # Anything - return page - ===========changed ref 1=========== <s>bool] = False, - block_images: Optional[bool] = True, block_webrtc: Optional[bool] = False, network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, + page_action: Callable = do_nothing, - page_action: Callable = _do_nothing, wait_selector: Optional[str] = None, wait_selector_state: str = 'attached', + adaptor_arguments: Dict = None ): self.headless = headless self.block_images = bool(block_images) self.block_webrtc = bool(block_webrtc) self.network_idle = bool(network_idle) self.timeout = check_type_validity(timeout, [int, float], 30000) if callable(page_action): self.page_action = page_action else: + self.page_action = do_nothing - self.page_action = _do_nothing logging.error('[Ignored] Argument "page_action" must be callable') self.wait_selector = wait_selector self.wait_selector_state = wait_selector_state + self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {} ===========changed ref 2=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 3=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 4=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 5=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 6=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 7=========== + # module: scrapling.core.custom_types + + ===========changed ref 8=========== + # module: scrapling.core.mixins + + ===========changed ref 9=========== + # module: scrapling.core.translator + + ===========changed ref 10=========== + # module: scrapling.core.utils + + ===========changed ref 11=========== + # module: scrapling.core + + ===========changed ref 12=========== # module: scrapling.engines.pw - - ===========changed ref 13=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 14=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 15=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 16=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 17=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 18=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 19=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 20=========== # module: scrapling.engines.pw - def _do_nothing(page): - # Anything - return page - ===========changed ref 21=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 22=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 23=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 24=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 25=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 26=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 27=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 28=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None + ===========changed ref 29=========== + # module: scrapling.core.custom_types + class TextHandler(str): + """Extends standard Python string by adding more functionality""" + __slots__ = () + ===========changed ref 30=========== + # module: scrapling.core.storage_adaptors + @cache(None, typed=True) + class SQLiteStorageSystem(StorageSystemMixin): + def __del__(self): + """To ensure all connections are closed when the object is destroyed.""" + self.close() + ===========changed ref 31=========== + # module: scrapling.engines.toolbelt.custom + # Pew Pew + def do_nothing(page): + # Just works as a filler for `page_action` argument in browser engines + return page + ===========changed ref 32=========== + # module: scrapling.engines.toolbelt.custom + @dataclass(frozen=True) + class Response: + def __repr__(self): + return f'<{self.__class__.__name__} [{self.status} {self.reason}]>' + ===========changed ref 33=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def get(self, key, default=None): + """Acts like standard dictionary `.get()` method""" + return self._data.get(key, default) + ===========changed ref 34=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + @property + def json_string(self): + """Convert current attributes to JSON string if the attributes are JSON serializable otherwise throws error""" + return dumps(dict(self._data)) + ===========changed ref 35=========== + # module: scrapling.core.custom_types + class TextHandler(str): + def sort(self, reverse: bool = False) -> str: + """Return a sorted version of the string""" + return self.__class__("".join(sorted(self, reverse=reverse))) + ===========changed ref 36=========== + # module: scrapling.core.translator + regex = f"[{HTML5_WHITESPACE}]+" + replace_html5_whitespaces = re.compile(regex).sub +
scrapling.fetcher/Fetcher.get
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<0>:<add> response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).get(url, stealthy_headers, **kwargs) <add> return response_object <del> html_content = StaticEngine(follow_redirects, timeout).get(url, stealthy_headers, **kwargs) <1>:<del> return self.__generate_adaptor(url, html_content)
<s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def get(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def get(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: <0> html_content = StaticEngine(follow_redirects, timeout).get(url, stealthy_headers, **kwargs) <1> return self.__generate_adaptor(url, html_content) <2>
===========changed ref 0=========== # module: scrapling.fetcher + class Fetcher(BaseFetcher): - class Fetcher: - def fetch(self, url: str) -> Adaptor: - html_content = self.engine.fetch(url) - return self.__generate_adaptor(url, html_content) - ===========changed ref 1=========== <s>class StealthyFetcher(BaseFetcher): + def fetch( + self, url: str, headless: Union[bool, str] = True, block_images: Optional[bool] = False, block_webrtc: Optional[bool] = False, + network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, + wait_selector_state: str = 'attached', + ) -> Response: + engine = CamoufoxEngine( + timeout=timeout, + headless=headless, + page_action=page_action, + block_images=block_images, + block_webrtc=block_webrtc, + network_idle=network_idle, + wait_selector=wait_selector, + wait_selector_state=wait_selector_state, + adaptor_arguments=self.adaptor_arguments, + ) + return engine.fetch(url) + ===========changed ref 2=========== # module: scrapling.fetcher + class Fetcher(BaseFetcher): - class Fetcher: - def __generate_adaptor(self, url, html_content): - """To make the code less repetitive and manage return result from one function""" - return Adaptor( - text=html_content, - url=url, - encoding=self.__encoding, - huge_tree=self.__huge_tree, - keep_comments=self.__keep_comments, - auto_match=self.__auto_match, - storage=self.__storage, - storage_args=self.__storage_args, - debug=self.__debug, - ) - ===========changed ref 3=========== <s> __init__( - self, - browser_engine: Optional[object] = None, - # Adaptor class parameters - response_encoding: str = "utf8", - huge_tree: bool = True, - keep_comments: Optional[bool] = False, - auto_match: Optional[bool] = False, - storage: Any = SQLiteStorageSystem, - storage_args: Optional[Dict] = None, - debug: Optional[bool] = True, - ): - if browser_engine is not None: - self.engine = check_if_engine_usable(browser_engine) - else: - self.engine = CamoufoxEngine() - # I won't validate Adaptor's class parameters here again, I will leave it to be validated later - self.__encoding = response_encoding - self.__huge_tree = huge_tree - self.__keep_comments = keep_comments - self.__auto_match = auto_match - self.__storage = storage - self.__storage_args = storage_args - self.__debug = debug - ===========changed ref 4=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 5=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 6=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 7=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 8=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 9=========== + # module: scrapling.core.custom_types + + ===========changed ref 10=========== + # module: scrapling.core.mixins + + ===========changed ref 11=========== + # module: scrapling.core.translator + + ===========changed ref 12=========== + # module: scrapling.core.utils + + ===========changed ref 13=========== + # module: scrapling.core + + ===========changed ref 14=========== # module: scrapling.engines.pw - - ===========changed ref 15=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 16=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 17=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 18=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 19=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 20=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 21=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 22=========== # module: scrapling.engines.camo - def _do_nothing(page): - # Anything - return page - ===========changed ref 23=========== # module: scrapling.engines.pw - def _do_nothing(page): - # Anything - return page - ===========changed ref 24=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 25=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 26=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 27=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 28=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 29=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 30=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 31=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None + ===========changed ref 32=========== + # module: scrapling.core.custom_types + class TextHandler(str): + """Extends standard Python string by adding more functionality""" + __slots__ = () + ===========changed ref 33=========== + # module: scrapling.core.storage_adaptors + @cache(None, typed=True) + class SQLiteStorageSystem(StorageSystemMixin): + def __del__(self): + """To ensure all connections are closed when the object is destroyed.""" + self.close() + ===========changed ref 34=========== + # module: scrapling.engines.toolbelt.custom + # Pew Pew + def do_nothing(page): + # Just works as a filler for `page_action` argument in browser engines + return page +
scrapling.fetcher/Fetcher.post
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<0>:<add> response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).post(url, stealthy_headers, **kwargs) <del> html_content = StaticEngine(follow_redirects, timeout).post(url, stealthy_headers, **kwargs) <1>:<add> return response_object <del> return self.__generate_adaptor(url, html_content)
<s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def post(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def post(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: <0> html_content = StaticEngine(follow_redirects, timeout).post(url, stealthy_headers, **kwargs) <1> return self.__generate_adaptor(url, html_content) <2>
===========changed ref 0=========== # module: scrapling.fetcher + class Fetcher(BaseFetcher): - class Fetcher: - def fetch(self, url: str) -> Adaptor: - html_content = self.engine.fetch(url) - return self.__generate_adaptor(url, html_content) - ===========changed ref 1=========== <s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def get(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def get(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: + response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).get(url, stealthy_headers, **kwargs) + return response_object - html_content = StaticEngine(follow_redirects, timeout).get(url, stealthy_headers, **kwargs) - return self.__generate_adaptor(url, html_content) ===========changed ref 2=========== <s>class StealthyFetcher(BaseFetcher): + def fetch( + self, url: str, headless: Union[bool, str] = True, block_images: Optional[bool] = False, block_webrtc: Optional[bool] = False, + network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, + wait_selector_state: str = 'attached', + ) -> Response: + engine = CamoufoxEngine( + timeout=timeout, + headless=headless, + page_action=page_action, + block_images=block_images, + block_webrtc=block_webrtc, + network_idle=network_idle, + wait_selector=wait_selector, + wait_selector_state=wait_selector_state, + adaptor_arguments=self.adaptor_arguments, + ) + return engine.fetch(url) + ===========changed ref 3=========== # module: scrapling.fetcher + class Fetcher(BaseFetcher): - class Fetcher: - def __generate_adaptor(self, url, html_content): - """To make the code less repetitive and manage return result from one function""" - return Adaptor( - text=html_content, - url=url, - encoding=self.__encoding, - huge_tree=self.__huge_tree, - keep_comments=self.__keep_comments, - auto_match=self.__auto_match, - storage=self.__storage, - storage_args=self.__storage_args, - debug=self.__debug, - ) - ===========changed ref 4=========== <s> __init__( - self, - browser_engine: Optional[object] = None, - # Adaptor class parameters - response_encoding: str = "utf8", - huge_tree: bool = True, - keep_comments: Optional[bool] = False, - auto_match: Optional[bool] = False, - storage: Any = SQLiteStorageSystem, - storage_args: Optional[Dict] = None, - debug: Optional[bool] = True, - ): - if browser_engine is not None: - self.engine = check_if_engine_usable(browser_engine) - else: - self.engine = CamoufoxEngine() - # I won't validate Adaptor's class parameters here again, I will leave it to be validated later - self.__encoding = response_encoding - self.__huge_tree = huge_tree - self.__keep_comments = keep_comments - self.__auto_match = auto_match - self.__storage = storage - self.__storage_args = storage_args - self.__debug = debug - ===========changed ref 5=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 6=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 7=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 8=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 9=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 10=========== + # module: scrapling.core.custom_types + + ===========changed ref 11=========== + # module: scrapling.core.mixins + + ===========changed ref 12=========== + # module: scrapling.core.translator + + ===========changed ref 13=========== + # module: scrapling.core.utils + + ===========changed ref 14=========== + # module: scrapling.core + + ===========changed ref 15=========== # module: scrapling.engines.pw - - ===========changed ref 16=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 17=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 18=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 19=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 20=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 21=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 22=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 23=========== # module: scrapling.engines.camo - def _do_nothing(page): - # Anything - return page - ===========changed ref 24=========== # module: scrapling.engines.pw - def _do_nothing(page): - # Anything - return page - ===========changed ref 25=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 26=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 27=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 28=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) + ===========changed ref 29=========== + # module: scrapling.engines.toolbelt.fingerprints + """ + Functions related to generating headers and fingerprints generally + """ + ===========changed ref 30=========== + # module: scrapling.core.translator + class HTMLTranslator(TranslatorMixin, OriginalHTMLTranslator): + @cache(maxsize=256) + def css_to_xpath(self, css: str, prefix: str = "descendant-or-self::") -> str: + return super().css_to_xpath(css, prefix) + ===========changed ref 31=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __repr__(self): + return f"{self.__class__.__name__}({self._data})" + ===========changed ref 32=========== + # module: scrapling.core.translator + class XPathExpr(OriginalXPathExpr): + textnode: bool = False + attribute: Optional[str] = None +
scrapling.fetcher/Fetcher.put
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<0>:<add> response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).put(url, stealthy_headers, **kwargs) <add> return response_object <del> html_content = StaticEngine(follow_redirects, timeout).put(url, stealthy_headers, **kwargs) <1>:<del> return self.__generate_adaptor(url, html_content)
<s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def put(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def put(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: <0> html_content = StaticEngine(follow_redirects, timeout).put(url, stealthy_headers, **kwargs) <1> return self.__generate_adaptor(url, html_content) <2>
===========changed ref 0=========== # module: scrapling.fetcher + class Fetcher(BaseFetcher): - class Fetcher: - def fetch(self, url: str) -> Adaptor: - html_content = self.engine.fetch(url) - return self.__generate_adaptor(url, html_content) - ===========changed ref 1=========== <s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def post(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def post(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: + response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).post(url, stealthy_headers, **kwargs) - html_content = StaticEngine(follow_redirects, timeout).post(url, stealthy_headers, **kwargs) + return response_object - return self.__generate_adaptor(url, html_content) ===========changed ref 2=========== <s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def get(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def get(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: + response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).get(url, stealthy_headers, **kwargs) + return response_object - html_content = StaticEngine(follow_redirects, timeout).get(url, stealthy_headers, **kwargs) - return self.__generate_adaptor(url, html_content) ===========changed ref 3=========== <s>class StealthyFetcher(BaseFetcher): + def fetch( + self, url: str, headless: Union[bool, str] = True, block_images: Optional[bool] = False, block_webrtc: Optional[bool] = False, + network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, + wait_selector_state: str = 'attached', + ) -> Response: + engine = CamoufoxEngine( + timeout=timeout, + headless=headless, + page_action=page_action, + block_images=block_images, + block_webrtc=block_webrtc, + network_idle=network_idle, + wait_selector=wait_selector, + wait_selector_state=wait_selector_state, + adaptor_arguments=self.adaptor_arguments, + ) + return engine.fetch(url) + ===========changed ref 4=========== # module: scrapling.fetcher + class Fetcher(BaseFetcher): - class Fetcher: - def __generate_adaptor(self, url, html_content): - """To make the code less repetitive and manage return result from one function""" - return Adaptor( - text=html_content, - url=url, - encoding=self.__encoding, - huge_tree=self.__huge_tree, - keep_comments=self.__keep_comments, - auto_match=self.__auto_match, - storage=self.__storage, - storage_args=self.__storage_args, - debug=self.__debug, - ) - ===========changed ref 5=========== <s> __init__( - self, - browser_engine: Optional[object] = None, - # Adaptor class parameters - response_encoding: str = "utf8", - huge_tree: bool = True, - keep_comments: Optional[bool] = False, - auto_match: Optional[bool] = False, - storage: Any = SQLiteStorageSystem, - storage_args: Optional[Dict] = None, - debug: Optional[bool] = True, - ): - if browser_engine is not None: - self.engine = check_if_engine_usable(browser_engine) - else: - self.engine = CamoufoxEngine() - # I won't validate Adaptor's class parameters here again, I will leave it to be validated later - self.__encoding = response_encoding - self.__huge_tree = huge_tree - self.__keep_comments = keep_comments - self.__auto_match = auto_match - self.__storage = storage - self.__storage_args = storage_args - self.__debug = debug - ===========changed ref 6=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 7=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 8=========== + # module: scrapling.engines.toolbelt.custom + + ===========changed ref 9=========== + # module: scrapling.engines.toolbelt.fingerprints + + ===========changed ref 10=========== + # module: scrapling.core.storage_adaptors + + ===========changed ref 11=========== + # module: scrapling.core.custom_types + + ===========changed ref 12=========== + # module: scrapling.core.mixins + + ===========changed ref 13=========== + # module: scrapling.core.translator + + ===========changed ref 14=========== + # module: scrapling.core.utils + + ===========changed ref 15=========== + # module: scrapling.core + + ===========changed ref 16=========== # module: scrapling.engines.pw - - ===========changed ref 17=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def css_to_xpath(self, css: str, prefix: str = ...) -> str: + pass + ===========changed ref 18=========== + # module: scrapling.core.translator + # e.g. cssselect.GenericTranslator, cssselect.HTMLTranslator + class TranslatorProtocol(Protocol): + def xpath_element(self, selector: Element) -> OriginalXPathExpr: + pass + ===========changed ref 19=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __contains__(self, key): + return key in self._data + ===========changed ref 20=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __str__(self): + return str(self._data) + ===========changed ref 21=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __len__(self): + return len(self._data) + ===========changed ref 22=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __iter__(self): + return iter(self._data) + ===========changed ref 23=========== + # module: scrapling.core.custom_types + class AttributesHandler(Mapping): + def __getitem__(self, key): + return self._data[key] + ===========changed ref 24=========== # module: scrapling.engines.camo - def _do_nothing(page): - # Anything - return page - ===========changed ref 25=========== # module: scrapling.engines.pw - def _do_nothing(page): - # Anything - return page - ===========changed ref 26=========== + # module: scrapling.engines.toolbelt.navigation + """ + Functions related to files and URLs + """ + ===========changed ref 27=========== + # module: scrapling.core._types + """ + Type definitions for type checking purposes. + """ + ===========changed ref 28=========== + # module: scrapling.engines.toolbelt.custom + """ + Functions related to custom types or type checking + """ + ===========changed ref 29=========== + # module: scrapling.core.utils + def flatten(lst: Iterable): + return list(chain.from_iterable(lst)) +
scrapling.fetcher/Fetcher.delete
Modified
D4Vinci~Scrapling
145c03daffb8b7b3b2d25e78ee7d03f2e9e8d123
Big structure changes (check commit description)
<0>:<add> response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).delete(url, stealthy_headers, **kwargs) <del> html_content = StaticEngine(follow_redirects, timeout).delete(url, stealthy_headers, **kwargs) <1>:<add> return response_object <del> return self.__generate_adaptor(url, html_content)
<s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def delete(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def delete(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: <0> html_content = StaticEngine(follow_redirects, timeout).delete(url, stealthy_headers, **kwargs) <1> return self.__generate_adaptor(url, html_content) <2>
===========unchanged ref 0=========== at: scrapling.engines.pw PlaywrightEngine(headless: Union[bool, str]=True, disable_resources: Optional[List]=None, useragent: Optional[str]=None, network_idle: Optional[bool]=False, timeout: Optional[float]=30000, page_action: Callable=_do_nothing, wait_selector: Optional[str]=None, wait_selector_state: Optional[str]='attached', stealth: bool=False, hide_canvas: bool=True, disable_webgl: bool=False, cdp_url: Optional[str]=None, nstbrowser_mode: bool=False, nstbrowser_config: Optional[Dict]=None) ===========changed ref 0=========== # module: scrapling.fetcher + class Fetcher(BaseFetcher): - class Fetcher: - def fetch(self, url: str) -> Adaptor: - html_content = self.engine.fetch(url) - return self.__generate_adaptor(url, html_content) - ===========changed ref 1=========== <s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def put(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def put(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: + response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).put(url, stealthy_headers, **kwargs) + return response_object - html_content = StaticEngine(follow_redirects, timeout).put(url, stealthy_headers, **kwargs) - return self.__generate_adaptor(url, html_content) ===========changed ref 2=========== <s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def post(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def post(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: + response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).post(url, stealthy_headers, **kwargs) - html_content = StaticEngine(follow_redirects, timeout).post(url, stealthy_headers, **kwargs) + return response_object - return self.__generate_adaptor(url, html_content) ===========changed ref 3=========== <s>fetcher + class Fetcher(BaseFetcher): - class Fetcher: + def get(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response: - def get(self, url: str, follow_redirects: bool = True, timeout: Optional[Union[int, float]] = None, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Adaptor: + response_object = StaticEngine(follow_redirects, timeout, adaptor_arguments=self.adaptor_arguments).get(url, stealthy_headers, **kwargs) + return response_object - html_content = StaticEngine(follow_redirects, timeout).get(url, stealthy_headers, **kwargs) - return self.__generate_adaptor(url, html_content) ===========changed ref 4=========== <s>class StealthyFetcher(BaseFetcher): + def fetch( + self, url: str, headless: Union[bool, str] = True, block_images: Optional[bool] = False, block_webrtc: Optional[bool] = False, + network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, + wait_selector_state: str = 'attached', + ) -> Response: + engine = CamoufoxEngine( + timeout=timeout, + headless=headless, + page_action=page_action, + block_images=block_images, + block_webrtc=block_webrtc, + network_idle=network_idle, + wait_selector=wait_selector, + wait_selector_state=wait_selector_state, + adaptor_arguments=self.adaptor_arguments, + ) + return engine.fetch(url) + ===========changed ref 5=========== # module: scrapling.fetcher + class Fetcher(BaseFetcher): - class Fetcher: - def __generate_adaptor(self, url, html_content): - """To make the code less repetitive and manage return result from one function""" - return Adaptor( - text=html_content, - url=url, - encoding=self.__encoding, - huge_tree=self.__huge_tree, - keep_comments=self.__keep_comments, - auto_match=self.__auto_match, - storage=self.__storage, - storage_args=self.__storage_args, - debug=self.__debug, - ) - ===========changed ref 6=========== <s> __init__( - self, - browser_engine: Optional[object] = None, - # Adaptor class parameters - response_encoding: str = "utf8", - huge_tree: bool = True, - keep_comments: Optional[bool] = False, - auto_match: Optional[bool] = False, - storage: Any = SQLiteStorageSystem, - storage_args: Optional[Dict] = None, - debug: Optional[bool] = True, - ): - if browser_engine is not None: - self.engine = check_if_engine_usable(browser_engine) - else: - self.engine = CamoufoxEngine() - # I won't validate Adaptor's class parameters here again, I will leave it to be validated later - self.__encoding = response_encoding - self.__huge_tree = huge_tree - self.__keep_comments = keep_comments - self.__auto_match = auto_match - self.__storage = storage - self.__storage_args = storage_args - self.__debug = debug - ===========changed ref 7=========== <s> page_action: Callable = do_nothing, + wait_selector: Optional[str] = None, + wait_selector_state: Optional[str] = 'attached', + stealth: bool = False, + hide_canvas: bool = True, + disable_webgl: bool = False, + cdp_url: Optional[str] = None, + nstbrowser_mode: bool = False, + nstbrowser_config: Optional[Dict] = None, + ) -> Response: + engine = PlaywrightEngine( + timeout=timeout, + stealth=stealth, + cdp_url=cdp_url, + headless=headless, + useragent=useragent, + page_action=page_action, + hide_canvas=hide_canvas, + network_idle=network_idle, + wait_selector=wait_selector, + disable_webgl=disable_webgl, + nstbrowser_mode=nstbrowser_mode, + nstbrowser_config=nstbrowser_config, + disable_resources=disable_resources, + wait_selector_state=wait_selector_state, + adaptor_arguments=self.adaptor_arguments, + ) + return engine.fetch(url) + ===========changed ref 8=========== + # module: scrapling.engines.toolbelt + + ===========changed ref 9=========== + # module: scrapling.engines.toolbelt.navigation + + ===========changed ref 10=========== + # module: scrapling.engines.toolbelt.custom + +
scrapling.parser/Adaptor.json
Modified
D4Vinci~Scrapling
942aa08c8502c1eaa8c611f349e9db3f6bfb9baf
Handling JSON responses better
<1>:<add> if self.text: <add> return self.text.json() <del> return self.text.json() <2>:<add> else: <add> return self.get_all_text(strip=True).json()
# module: scrapling.parser class Adaptor(SelectorsGeneration): # Operations on text functions def json(self) -> Dict: <0> """Return json response if the response is jsonable otherwise throws error""" <1> return self.text.json() <2>
===========unchanged ref 0=========== at: scrapling.parser.Adaptor __slots__ = ( 'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug', '__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag', ) body = html_content at: typing Dict = _alias(dict, 2, inst=False, name='Dict')
scrapling.engines.static/StaticEngine._prepare_response
Modified
D4Vinci~Scrapling
afe53332b8e351c7f290839d5b725f78c29456a6
Convert the request headers to dictionary to unify response object shape across engines
<9>:<add> request_headers=dict(response.request.headers), <del> request_headers=response.request.headers,
# module: scrapling.engines.static class StaticEngine: def _prepare_response(self, response: httpxResponse): <0> return Response( <1> url=str(response.url), <2> text=response.text, <3> content=response.content, <4> status=response.status_code, <5> reason=response.reason_phrase, <6> encoding=response.encoding or 'utf-8', <7> cookies=dict(response.cookies), <8> headers=dict(response.headers), <9> request_headers=response.request.headers, <10> adaptor_arguments=self.adaptor_arguments <11> ) <12>
===========unchanged ref 0=========== at: httpx._models Response(status_code: int, *, headers: HeaderTypes | None=None, content: ResponseContent | None=None, text: str | None=None, html: str | None=None, json: typing.Any=None, stream: SyncByteStream | AsyncByteStream | None=None, request: Request | None=None, extensions: ResponseExtensions | None=None, history: list[Response] | None=None, default_encoding: str | typing.Callable[[bytes], str]="utf-8") at: httpx._models.Request.__init__ self.headers = Headers(headers) at: httpx._models.Request._prepare self.headers = Headers(auto_headers + self.headers.raw) at: httpx._models.Response request(self, value: Request) -> None NoneType() encoding(self, value: str) -> None at: httpx._models.Response.__init__ self.status_code = status_code self.headers = Headers(headers) at: scrapling.engines.static.StaticEngine.__init__ self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {} at: scrapling.engines.toolbelt.custom Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict)) at: scrapling.engines.toolbelt.custom.Response url: str text: str content: bytes status: int reason: str encoding: str = 'utf-8' # default encoding cookies: Dict = field(default_factory=dict) headers: Dict = field(default_factory=dict) request_headers: Dict = field(default_factory=dict) ===========unchanged ref 1=========== adaptor_arguments: Dict = field(default_factory=dict)