path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
app.backend.app/ensure_openai_token
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<2>:<add> openai_token = await current_app.config[CONFIG_CREDENTIAL].get_token("https://cognitiveservices.azure.com/.default") <del> openai_token = current_app.config[CONFIG_CREDENTIAL].get_token("https://cognitiveservices.azure.com/.default")
# module: app.backend.app @bp.before_request + async def ensure_openai_token(): - def ensure_openai_token(): <0> openai_token = current_app.config[CONFIG_OPENAI_TOKEN] <1> if openai_token.expires_on < time.time() + 60: <2> openai_token = current_app.config[CONFIG_CREDENTIAL].get_token("https://cognitiveservices.azure.com/.default") <3> current_app.config[CONFIG_OPENAI_TOKEN] = openai_token <4> openai.api_key = openai_token.token <5>
===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" bp = Blueprint("routes", __name__, static_folder='static') at: openai api_key = os.environ.get("OPENAI_API_KEY") at: time time() -> float ===========changed ref 0=========== # module: app.backend.app @bp.route("/assets/<path:path>") + async def assets(path): - def assets(path): + return await send_from_directory("static/assets", path) - return send_from_directory("static/assets", path) ===========changed ref 1=========== # module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): + return await bp.send_static_file("favicon.ico") - return bp.send_static_file("favicon.ico") ===========changed ref 2=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 3=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) + async def chat(): - def chat(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 + request_json = await request.get_json() + approach = request_json["approach"] - approach = request.json["approach"] try: impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 + r = await impl.run(request_json["history"], request_json.get("overrides") or {}) - r = impl.run(request.json["history"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /chat") return jsonify({"error": str(e)}), 500 ===========changed ref 4=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) + async def ask(): - def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 + request_json = await request.get_json() + approach = request_json["approach"] - approach = request.json["approach"] try: impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 + r = await impl.run(request_json["question"], request_json.get("overrides") or {}) - r = impl.run(request.json["question"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 5=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @bp.route("/content/<path>") + async def content_file(path): - def content_file(path): blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) + blob = await blob_container.get_blob_client(path).download_blob() - blob = blob_container.get_blob_client(path).download_blob() if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() + await blob.readinto(blob_file) - blob.readinto(blob_file) blob_file.seek(0) + return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) - return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) ===========changed ref 6=========== + # module: app.backend.approaches + + ===========changed ref 7=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 8=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 9=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 10=========== + # module: app.backend.main + app = create_app() + ===========changed ref 11=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 12=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 13=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 14=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 15=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 16=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client()
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> app = Quart(__name__) <del> app = Flask(__name__) <1>:<del> <2>:<del> # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, <3>:<del> # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the <4>:<del> # keys for each service <5>:<del> # If you encounter a blocking error during a DefaultAzureCredntial resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) <6>:<del> azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) <7>:<del> <8>:<del> # Set up clients for Cognitive Search and Storage <9>:<del> search_client = SearchClient( <10>:<del> endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", <11>:<del> index_name=AZURE_SEARCH_INDEX, <12>:<del> credential=azure_credential) <13>:<del> blob_client = BlobServiceClient( <14>:<del> account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", <15>:<del> credential=azure_credential) <16>:<del> <17>:<del> # Used by the OpenAI SDK <18>:<del> openai.api_type = "azure" <19>:<del> openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" <20>:<del> openai.api_version = "2023-05-15" <21>:<del> <22>:<del> # Comment these two lines out if using keys, set your API key in the OPENAI_API_KEY environment variable instead <23>:<del> openai.api_type = "azure_ad" <24>:<del> openai_token = azure_credential.get_token( <25>:<del> "https://cognitiveservices.azure.com/.default" <26>:<del> ) <27>:<del> openai.api_key = openai_token.token <28>:<del> <29>:<del> # Store on app.config for later use inside requests <30>:<del> app.config[CONFIG_OPENAI_TOKEN] = openai_token <31>:<del> app.config[CONFIG_CREDENTIAL] = azure_credential
# module: app.backend.app def create_app(): <0> app = Flask(__name__) <1> <2> # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, <3> # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the <4> # keys for each service <5> # If you encounter a blocking error during a DefaultAzureCredntial resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) <6> azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) <7> <8> # Set up clients for Cognitive Search and Storage <9> search_client = SearchClient( <10> endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", <11> index_name=AZURE_SEARCH_INDEX, <12> credential=azure_credential) <13> blob_client = BlobServiceClient( <14> account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", <15> credential=azure_credential) <16> <17> # Used by the OpenAI SDK <18> openai.api_type = "azure" <19> openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" <20> openai.api_version = "2023-05-15" <21> <22> # Comment these two lines out if using keys, set your API key in the OPENAI_API_KEY environment variable instead <23> openai.api_type = "azure_ad" <24> openai_token = azure_credential.get_token( <25> "https://cognitiveservices.azure.com/.default" <26> ) <27> openai.api_key = openai_token.token <28> <29> # Store on app.config for later use inside requests <30> app.config[CONFIG_OPENAI_TOKEN] = openai_token <31> app.config[CONFIG_CREDENTIAL] = azure_credential </s>
===========below chunk 0=========== # module: app.backend.app def create_app(): # offset: 1 # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ), "rrr": ReadRetrieveReadApproach( search_client, AZURE_OPENAI_GPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ), "rda": ReadDecomposeAsk(search_client, AZURE_OPENAI_GPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ) } app.config[CONFIG_CHAT_APPROACHES] = { "rrr": ChatReadRetrieveReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) } app.register_blueprint(bp) return app ===========unchanged ref 0=========== at: app.backend.app AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT", "mystorageaccount") AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE", "gptkb") AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX", "gptkbindex") AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE", "myopenai") AZURE_OPENAI_GPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_GPT_DEPLOYMENT", "davinci") AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "chat") AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT", "embedding") KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CLIENT = "blob_client" bp = Blueprint("routes", __name__, static_folder='static') at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) ===========unchanged ref 1=========== at: approaches.readdecomposeask ReadDecomposeAsk(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.readretrieveread ReadRetrieveReadApproach(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: openai api_key = os.environ.get("OPENAI_API_KEY") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) ===========changed ref 0=========== # module: app.backend.app @bp.route("/assets/<path:path>") + async def assets(path): - def assets(path): + return await send_from_directory("static/assets", path) - return send_from_directory("static/assets", path) ===========changed ref 1=========== # module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): + return await bp.send_static_file("favicon.ico") - return bp.send_static_file("favicon.ico") ===========changed ref 2=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 3=========== # module: app.backend.app @bp.before_request + async def ensure_openai_token(): - def ensure_openai_token(): openai_token = current_app.config[CONFIG_OPENAI_TOKEN] if openai_token.expires_on < time.time() + 60: + openai_token = await current_app.config[CONFIG_CREDENTIAL].get_token("https://cognitiveservices.azure.com/.default") - openai_token = current_app.config[CONFIG_CREDENTIAL].get_token("https://cognitiveservices.azure.com/.default") current_app.config[CONFIG_OPENAI_TOKEN] = openai_token openai.api_key = openai_token.token ===========changed ref 4=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) + async def chat(): - def chat(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 + request_json = await request.get_json() + approach = request_json["approach"] - approach = request.json["approach"] try: impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 + r = await impl.run(request_json["history"], request_json.get("overrides") or {}) - r = impl.run(request.json["history"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /chat") return jsonify({"error": str(e)}), 500
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.search
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<9>:<add> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] <del> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] <15>:<add> query_text = "" <del> query_text = None <18>:<add> r = await self.search_client.search(query_text, <del> r = self.search_client.search(query_text,
# module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: - def search(self, query_text: str, overrides: dict[str, Any]) -> str: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] <10> else: <11> query_vector = None <12> <13> # Only keep the text query if the retrieval mode uses text, otherwise drop it <14> if not has_text: <15> query_text = None <16> <17> if overrides.get("semantic_ranker") and has_text: <18> r = self.search_client.search(query_text, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None, <26> vector=query_vector, <27> top_k=50 if query_vector else None, <28> vector</s>
===========below chunk 0=========== # module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: - def search(self, query_text: str, overrides: dict[str, Any]) -> str: # offset: 1 else: r = self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) for doc in r] else: self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) for doc in r] return "\n".join(self.results) ===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== + # module: app.backend.approaches + + ===========changed ref 1=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 2=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 3=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 4=========== + # module: app.backend.main + app = create_app() + ===========changed ref 5=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 6=========== # module: app.backend.app - if __name__ == "__main__": - app = create_app() - app.run() - ===========changed ref 7=========== # module: app.backend.app @bp.route("/assets/<path:path>") + async def assets(path): - def assets(path): + return await send_from_directory("static/assets", path) - return send_from_directory("static/assets", path) ===========changed ref 8=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 9=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 10=========== # module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): + return await bp.send_static_file("favicon.ico") - return bp.send_static_file("favicon.ico") ===========changed ref 11=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 12=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 13=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 14=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 15=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 16=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 17=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" + timeout = 600 num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 - threads = 1 if num_cpus == 1 else 2 - timeout = 600 - worker_class = "gthread" + worker_class = "uvicorn.workers.UvicornWorker"
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.lookup
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> r = await self.search_client.search(q, <del> r = self.search_client.search(q, <10>:<add> answers = await r.get_answers() <del> answers = r.get_answers() <13>:<add> if await r.get_count() > 0: <del> if r.get_count() > 0: <14>:<add> return "\n".join([d['content'] async for d in r]) <del> return "\n".join(d['content'] for d in r)
# module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def lookup(self, q: str) -> Optional[str]: - def lookup(self, q: str) -> Optional[str]: <0> r = self.search_client.search(q, <1> top = 1, <2> include_total_count=True, <3> query_type=QueryType.SEMANTIC, <4> query_language="en-us", <5> query_speller="lexicon", <6> semantic_configuration_name="default", <7> query_answer="extractive|count-1", <8> query_caption="extractive|highlight-false") <9> <10> answers = r.get_answers() <11> if answers and len(answers) > 0: <12> return answers[0].text <13> if r.get_count() > 0: <14> return "\n".join(d['content'] for d in r) <15> return None <16>
===========changed ref 0=========== # module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: - def search(self, query_text: str, overrides: dict[str, Any]) -> str: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # If retrieval mode includes vectors, compute an embedding for the query if has_vector: + query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] - query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: + query_text = "" - query_text = None if overrides.get("semantic_ranker") and has_text: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-</s> ===========changed ref 1=========== # module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: - def search(self, query_text: str, overrides: dict[str, Any]) -> str: # offset: 1 <s> semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) else: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: + results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) async for doc in r] - self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) for doc in r] else: + results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) async for doc in r] - self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) for doc in r] + return results, "\n".join(results) ===========changed ref 2=========== + # module: app.backend.approaches + + ===========changed ref 3=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 4=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 5=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 6=========== + # module: app.backend.main + app = create_app() + ===========changed ref 7=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 8=========== # module: app.backend.app - if __name__ == "__main__": - app = create_app() - app.run() - ===========changed ref 9=========== # module: app.backend.app @bp.route("/assets/<path:path>") + async def assets(path): - def assets(path): + return await send_from_directory("static/assets", path) - return send_from_directory("static/assets", path) ===========changed ref 10=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 11=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 12=========== # module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): + return await bp.send_static_file("favicon.ico") - return bp.send_static_file("favicon.ico") ===========changed ref 13=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 14=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 15=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 16=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client()
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.run
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<del> # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple <1>:<add> search_results = None <del> self.results = None <2>:<add> async def search_and_store(q: str) -> Any: <add> nonlocal search_results <add> search_results, content = await self.search(q, overrides) <add> return content <9>:<add> Tool(name="Search", func=lambda _: 'Not implemented', coroutine=search_and_store, description="useful for when you need to ask with search", callbacks=cb_manager), <del> Tool(name="Search", func=lambda q: self.search(q, overrides), description="useful for when you need to ask with search", callbacks=cb_manager), <10>:<add> Tool(name="Lookup", func=lambda _: 'Not implemented', coroutine=self.lookup, description="useful for when you need to ask with lookup", callbacks=cb_manager) <del> Tool(name="Lookup", func=self.lookup, description="useful for when you need to ask with lookup", callbacks=cb_manager) <13>:<del> # Like results above, not great to keep this as a global, will interfere with interleaving <14>:<del> global prompt <19>:<add> class ReAct(ReActDocstoreAgent): <add> @classmethod <add> def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: <add> return prompt <add> <21>:<add> result = await chain.arun(q) <del> result = chain.run(q)
# module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: <0> # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple <1> self.results = None <2> <3> # Use to capture thought process during iterations <4> cb_handler = HtmlCallbackHandler() <5> cb_manager = CallbackManager(handlers=[cb_handler]) <6> <7> llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) <8> tools = [ <9> Tool(name="Search", func=lambda q: self.search(q, overrides), description="useful for when you need to ask with search", callbacks=cb_manager), <10> Tool(name="Lookup", func=self.lookup, description="useful for when you need to ask with lookup", callbacks=cb_manager) <11> ] <12> <13> # Like results above, not great to keep this as a global, will interfere with interleaving <14> global prompt <15> prompt_prefix = overrides.get("prompt_template") <16> prompt = PromptTemplate.from_examples( <17> EXAMPLES, SUFFIX, ["input", "agent_scratchpad"], prompt_prefix + "\n\n" + PREFIX if prompt_prefix else PREFIX) <18> <19> agent = ReAct.from_llm_and_tools(llm, tools) <20> chain = AgentExecutor.from_agent_and_tools(agent, tools, verbose=True, callback_manager=cb_manager) <21> result = chain.run(q) <22> <23> # Replace substrings of the form <file.ext> with [file.ext] so that the frontend can render them as links, match them with a regex to avoid <24> #</s>
===========below chunk 0=========== # module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 result = re.sub(r"<([a-zA-Z0-9_ \-\.]+)>", r"[\1]", result) return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask EXAMPLES = [ """Question: What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into? Thought: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado orogeny extends into, then find the elevation range of the area. Action: Search[Colorado orogeny] Observation: <info1.pdf> The Colorado orogeny was an episode of mountain building (an orogeny) in Colorado and surrounding areas. Thought: It does not mention the eastern sector. So I need to look up eastern sector. Action: Lookup[eastern sector] Observation: <info2.txt> (Result 1 / 1) The eastern sector extends into the High Plains and is called the Central Plains orogeny. Thought: The eastern sector of Colorado orogeny extends into the High Plains. So I need to search High Plains and find its elevation range. Action: Search[High Plains] Observation: <some_file.pdf> High Plains refers to one of two distinct land regions Thought: I need to instead search High Plains (United States). Action: Search[High Plains (United States)] Observation: <filea.pdf> The High Plains are a subregion of the Great Plains. <another-ref.docx> From east to west, the High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130 m). Thought: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft. Action: Finish[1,800 to 7,000 ft <filea.pdf>]""", """Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons" character Milhouse, who</s> ===========unchanged ref 1=========== SUFFIX = """\nQuestion: {input} {agent_scratchpad}""" PREFIX = "Answer questions as shown in the following examples, by splitting the question into individual search or lookup actions to find facts until you can answer the question. " \ "Observations are prefixed by their source name in angled brackets, source names MUST be included with the actions in the answers." \ "All questions must be answered from the results from search or look up actions, only facts resulting from those can be used in an answer. " at: openai api_key = os.environ.get("OPENAI_API_KEY") ===========changed ref 0=========== # module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def lookup(self, q: str) -> Optional[str]: - def lookup(self, q: str) -> Optional[str]: + r = await self.search_client.search(q, - r = self.search_client.search(q, top = 1, include_total_count=True, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", query_answer="extractive|count-1", query_caption="extractive|highlight-false") + answers = await r.get_answers() - answers = r.get_answers() if answers and len(answers) > 0: return answers[0].text + if await r.get_count() > 0: - if r.get_count() > 0: + return "\n".join([d['content'] async for d in r]) - return "\n".join(d['content'] for d in r) return None ===========changed ref 1=========== # module: app.backend.approaches.readdecomposeask + class ReadDecomposeAsk(AskApproach): - class ReadDecomposeAsk(Approach): + def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: - def search(self, query_text: str, overrides: dict[str, Any]) -> str: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # If retrieval mode includes vectors, compute an embedding for the query if has_vector: + query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] - query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: + query_text = "" - query_text = None if overrides.get("semantic_ranker") and has_text: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-</s>
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
9ee22bf60904780d271b4e1e49645e10f84e77a9
Add application insights for request and backend tracing of performance data (#515)
<0>:<add> if APPLICATIONINSIGHTS_CONNECTION_STRING: <add> configure_azure_monitor() <add> AioHttpClientInstrumentor().instrument() <2>:<add> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) <add>
# module: app.backend.app def create_app(): <0> app = Quart(__name__) <1> app.register_blueprint(bp) <2> return app <3>
===========changed ref 0=========== # module: app.backend.app # Replace these with your own values, either in environment variables or directly here AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT", "mystorageaccount") AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER", "content") AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE", "gptkb") AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX", "gptkbindex") AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE", "myopenai") AZURE_OPENAI_GPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_GPT_DEPLOYMENT", "davinci") AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "chat") AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT", "embedding") KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") KB_FIELDS_CATEGORY = os.getenv("KB_FIELDS_CATEGORY", "category") KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CLIENT = "blob_client" + APPLICATIONINSIGHTS_CONNECTION_STRING = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") bp = Blueprint("routes", __name__, static_folder='static')
scripts.prepdocs/compute_embedding
Modified
Azure-Samples~azure-search-openai-demo
cf3a348ec05bef13647b4c89a02dfd4a06a7fc2d
Address recent bugs (#529)
<0>:<add> refresh_openai_token()
# module: scripts.prepdocs @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding(text): <0> return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] <1>
===========unchanged ref 0=========== at: scripts.prepdocs.create_sections section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } section["embedding"] = compute_embedding(content) ===========changed ref 0=========== # module: scripts.prepdocs MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 + open_ai_token_cache = {} + CACHE_KEY_TOKEN_CRED = 'openai_token_cred' + CACHE_KEY_CREATED_TIME = 'created_time' + CACHE_KEY_TOKEN_TYPE = 'token_type' +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
cf3a348ec05bef13647b4c89a02dfd4a06a7fc2d
Address recent bugs (#529)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(ChatApproach): def run(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> user_q = 'Generate search query for: ' + history[-1]["user"] <8> <9> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <10> messages = self.get_messages_from_history( <11> self.query_prompt_template, <12> self.chatgpt_model, <13> history, <14> user_q, <15> self.query_prompt_few_shots, <16> self.chatgpt_token_limit - len(user_q) <17> ) <18> <19> chat_completion = await openai.ChatCompletion.acreate( <20> deployment_id=self.chatgpt_deployment, <21> model=self.chatgpt_model, <22> messages=messages, <23> temperature=0.0, <24> max_tokens=32, <25> n=1) <26> <27> query_text = chat_completion.choices[0].message.content <28> if query_text.strip() == "0": <29> query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query <30> <31> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <32> <33> # If retrieval mode includes vectors, compute an embedding for the</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(ChatApproach): def run(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 if has_vector: query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) else: r = await self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) async for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field])</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(ChatApproach): def run(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s> results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # STEP 3: Generate a contextual and content specific answer using the search results and chat history # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_override") if prompt_override is None: system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) else: system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) messages = self.get_messages_from_history( system_message + "\n\nSources:\n" + content, self.chatgpt_model, history, history[-1]["user"], max_tokens=self.chatgpt_token_limit) chat_completion = await openai.ChatCompletion.acreate( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or</s> ===========below chunk 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(ChatApproach): def run(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 3 <s>7, max_tokens=1024, n=1) chat_content = chat_completion.choices[0].message.content msg_to_display = '\n\n'.join([str(message) for message in messages]) return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
2b077d857f56c983bd7eb1ce8248daf263eceb89
Add workaround for connection reset issue (#525)
<8>:<add> # Workaround for: https://github.com/openai/openai-python/issues/371 <add> async with aiohttp.ClientSession() as s: <add> openai.aiosession.set(s) <add> r = await impl.run(request_json["question"], request_json.get("overrides") or {}) <del> r = await impl.run(request_json["question"], request_json.get("overrides") or {})
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> approach = request_json["approach"] <4> try: <5> impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) <6> if not impl: <7> return jsonify({"error": "unknown approach"}), 400 <8> r = await impl.run(request_json["question"], request_json.get("overrides") or {}) <9> return jsonify(r) <10> except Exception as e: <11> logging.exception("Exception in /ask") <12> return jsonify({"error": str(e)}), 500 <13>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_ASK_APPROACHES = "ask_approaches" bp = Blueprint("routes", __name__, static_folder='static') at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections.
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
2b077d857f56c983bd7eb1ce8248daf263eceb89
Add workaround for connection reset issue (#525)
<8>:<add> # Workaround for: https://github.com/openai/openai-python/issues/371 <add> async with aiohttp.ClientSession() as s: <add> openai.aiosession.set(s) <add> r = await impl.run(request_json["history"], request_json.get("overrides") or {}) <del> r = await impl.run(request_json["history"], request_json.get("overrides") or {})
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> approach = request_json["approach"] <4> try: <5> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <6> if not impl: <7> return jsonify({"error": "unknown approach"}), 400 <8> r = await impl.run(request_json["history"], request_json.get("overrides") or {}) <9> return jsonify(r) <10> except Exception as e: <11> logging.exception("Exception in /chat") <12> return jsonify({"error": str(e)}), 500 <13>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACHES = "chat_approaches" bp = Blueprint("routes", __name__, static_folder='static') at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() approach = request_json["approach"] try: impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 + # Workaround for: https://github.com/openai/openai-python/issues/371 + async with aiohttp.ClientSession() as s: + openai.aiosession.set(s) + r = await impl.run(request_json["question"], request_json.get("overrides") or {}) - r = await impl.run(request_json["question"], request_json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
fb3aa0ab9499b8d79a5292ded2bc7d2ea718edab
Remove unneeded deployment (#540)
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, <1> # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the <2> # keys for each service <3> # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) <4> azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) <5> <6> # Set up clients for Cognitive Search and Storage <7> search_client = SearchClient( <8> endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", <9> index_name=AZURE_SEARCH_INDEX, <10> credential=azure_credential) <11> blob_client = BlobServiceClient( <12> account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", <13> credential=azure_credential) <14> <15> # Used by the OpenAI SDK <16> openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" <17> openai.api_version = "2023-05-15" <18> openai.api_type = "azure_ad" <19> openai_token = await azure_credential.get_token( <20> "https://cognitiveservices.azure.com/.default" <21> ) <22> openai.api_key = openai_token.token <23> <24> # Store on app.config for later use inside requests <25> current_app.config[CONFIG_OPENAI_TOKEN] = openai_token <26> current_app.config[CONFIG_CREDENTIAL] = azure_credential <27> current_app.config[CONFIG_BLOB_CLIENT] = blob_client <28> # Various approaches to integrate GPT and external knowledge, most applications will use a single one</s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ), "rrr": ReadRetrieveReadApproach( search_client, AZURE_OPENAI_GPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ), "rda": ReadDecomposeAsk(search_client, AZURE_OPENAI_GPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ) } current_app.config[CONFIG_CHAT_APPROACHES] = { "rrr": ChatReadRetrieveReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) } ===========unchanged ref 0=========== at: app.backend.app AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT", "mystorageaccount") AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE", "gptkb") AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX", "gptkbindex") AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE", "myopenai") AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "chat") AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT", "embedding") KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CLIENT = "blob_client" at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.readdecomposeask ReadDecomposeAsk(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) ===========unchanged ref 1=========== at: approaches.readretrieveread ReadRetrieveReadApproach(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: openai api_key = os.environ.get("OPENAI_API_KEY") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) ===========changed ref 0=========== # module: app.backend.app # Replace these with your own values, either in environment variables or directly here AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT", "mystorageaccount") AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER", "content") AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE", "gptkb") AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX", "gptkbindex") AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE", "myopenai") - AZURE_OPENAI_GPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_GPT_DEPLOYMENT", "davinci") AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "chat") AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT", "embedding") KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") KB_FIELDS_CATEGORY = os.getenv("KB_FIELDS_CATEGORY", "category") KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CLIENT = "blob_client" APPLICATIONINSIGHTS_CONNECTION_STRING = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") bp = Blueprint("routes", __name__, static_folder='static')
tests.conftest/client
Modified
Azure-Samples~azure-search-openai-demo
e166911098414c9cb07dcd6acec63ba3278536d9
Remove defaults for getenv (#550)
<0>:<add> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <add> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <add> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <add> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <add> monkeypatch.setenv("AZURE_OPENAI_SERVICE", "test-openai-service") <add> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "test-chatgpt") <add> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <add> monkeypatch.setenv("AZURE_OPENAI_EMB_DEPLOYMENT", "test-ada") <add> <del> # mock the DefaultAzureCredential
# module: tests.conftest @pytest_asyncio.fixture + async def client(monkeypatch): - async def client(): <0> # mock the DefaultAzureCredential <1> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <2> mock_default_azure_credential.return_value = MockAzureCredential() <3> quart_app = app.create_app() <4> <5> async with quart_app.test_app() as test_app: <6> quart_app.config.update( <7> { <8> "TESTING": True, <9> app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, <10> app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, <11> } <12> ) <13> <14> yield test_app.test_client() <15>
===========unchanged ref 0=========== at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: tests.conftest MockAzureCredential() at: unittest.mock _patcher(target: Any, new: _T, spec: Optional[Any]=..., create: bool=..., spec_set: Optional[Any]=..., autospec: Optional[Any]=..., new_callable: Optional[Any]=..., **kwargs: Any) -> _patch[_T] _patcher(target: Any, *, spec: Optional[Any]=..., create: bool=..., spec_set: Optional[Any]=..., autospec: Optional[Any]=..., new_callable: Optional[Any]=..., **kwargs: Any) -> _patch[Union[MagicMock, AsyncMock]] at: unittest.mock.NonCallableMock.__get_return_value self.return_value = ret
app.backend.app/content_file
Modified
Azure-Samples~azure-search-openai-demo
e166911098414c9cb07dcd6acec63ba3278536d9
Remove defaults for getenv (#550)
<0>:<add> blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] <del> blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) <1>:<add> blob = await blob_container_client.get_blob_client(path).download_blob() <del> blob = await blob_container.get_blob_client(path).download_blob()
# module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @bp.route("/content/<path>") async def content_file(path): <0> blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) <1> blob = await blob_container.get_blob_client(path).download_blob() <2> if not blob.properties or not blob.properties.has_key("content_settings"): <3> abort(404) <4> mime_type = blob.properties["content_settings"]["content_type"] <5> if mime_type == "application/octet-stream": <6> mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" <7> blob_file = io.BytesIO() <8> await blob.readinto(blob_file) <9> blob_file.seek(0) <10> return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) <11>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_ASK_APPROACHES = "ask_approaches" bp = Blueprint("routes", __name__, static_folder='static') at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== # module: app.backend.app - # Replace these with your own values, either in environment variables or directly here - AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT", "mystorageaccount") - AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER", "content") - AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE", "gptkb") - AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX", "gptkbindex") - AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE", "myopenai") - AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "chat") - AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") - AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT", "embedding") - - KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") - KB_FIELDS_CATEGORY = os.getenv("KB_FIELDS_CATEGORY", "category") - KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") - CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" + CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" - CONFIG_BLOB_CLIENT = "blob_client" - - APPLICATIONINSIGHTS_CONNECTION_STRING = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") bp = Blueprint("routes", __name__, static_folder='static') ===========changed ref 1=========== # module: tests.conftest @pytest_asyncio.fixture + async def client(monkeypatch): - async def client(): + monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") + monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") + monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") + monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") + monkeypatch.setenv("AZURE_OPENAI_SERVICE", "test-openai-service") + monkeypatch.setenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "test-chatgpt") + monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") + monkeypatch.setenv("AZURE_OPENAI_EMB_DEPLOYMENT", "test-ada") + - # mock the DefaultAzureCredential with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: mock_default_azure_credential.return_value = MockAzureCredential() quart_app = app.create_app() async with quart_app.test_app() as test_app: quart_app.config.update( { "TESTING": True, app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, } ) yield test_app.test_client()
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
e166911098414c9cb07dcd6acec63ba3278536d9
Remove defaults for getenv (#550)
<0>:<add> # Replace these with your own values, either in environment variables or directly here <add> AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT") <add> AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER") <add> AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE") <add> AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX") <add> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <add> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <add> AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL") <add> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") <add> <add> KB_FIELDS_CONTENT = os
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, <1> # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the <2> # keys for each service <3> # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) <4> azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) <5> <6> # Set up clients for Cognitive Search and Storage <7> search_client = SearchClient( <8> endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", <9> index_name=AZURE_SEARCH_INDEX, <10> credential=azure_credential) <11> blob_client = BlobServiceClient( <12> account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", <13> credential=azure_credential) <14> <15> # Used by the OpenAI SDK <16> openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" <17> openai.api_version = "2023-05-15" <18> openai.api_type = "azure_ad" <19> openai_token = await azure_credential.get_token( <20> "https://cognitiveservices.azure.com/.default" <21> ) <22> openai.api_key = openai_token.token <23> <24> # Store on app.config for later use inside requests <25> current_app.config[CONFIG_OPENAI_TOKEN] = openai_token <26> current_app.config[CONFIG_CREDENTIAL] = azure_credential <27> current_app.config[CONFIG_BLOB_CLIENT] = blob_client <28> # Various approaches to integrate GPT and external knowledge, most applications will use a single one</s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ), "rrr": ReadRetrieveReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ), "rda": ReadDecomposeAsk(search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ) } current_app.config[CONFIG_CHAT_APPROACHES] = { "rrr": ChatReadRetrieveReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) } ===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" at: app.backend.app.setup_clients AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT") AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER") AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE") AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX") AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL") AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.readdecomposeask ReadDecomposeAsk(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) ===========unchanged ref 1=========== at: approaches.readretrieveread ReadRetrieveReadApproach(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: openai api_key = os.environ.get("OPENAI_API_KEY") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) ===========changed ref 0=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @bp.route("/content/<path>") async def content_file(path): + blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] - blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) + blob = await blob_container_client.get_blob_client(path).download_blob() - blob = await blob_container.get_blob_client(path).download_blob() if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() await blob.readinto(blob_file) blob_file.seek(0) return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) ===========changed ref 1=========== # module: app.backend.app - # Replace these with your own values, either in environment variables or directly here - AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT", "mystorageaccount") - AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER", "content") - AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE", "gptkb") - AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX", "gptkbindex") - AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE", "myopenai") - AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "chat") - AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") - AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT", "embedding") - - KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") - KB_FIELDS_CATEGORY = os.getenv("KB_FIELDS_CATEGORY", "category") - KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") - CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" + CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" - CONFIG_BLOB_CLIENT = "blob_client" - - APPLICATIONINSIGHTS_CONNECTION_STRING = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") bp = Blueprint("routes", __name__, static_folder='static')
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
e166911098414c9cb07dcd6acec63ba3278536d9
Remove defaults for getenv (#550)
<0>:<add> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <del> if APPLICATIONINSIGHTS_CONNECTION_STRING:
# module: app.backend.app def create_app(): <0> if APPLICATIONINSIGHTS_CONNECTION_STRING: <1> configure_azure_monitor() <2> AioHttpClientInstrumentor().instrument() <3> app = Quart(__name__) <4> app.register_blueprint(bp) <5> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) <6> <7> return app <8>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder='static') at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @bp.route("/content/<path>") async def content_file(path): + blob_container_client = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] - blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) + blob = await blob_container_client.get_blob_client(path).download_blob() - blob = await blob_container.get_blob_client(path).download_blob() if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() await blob.readinto(blob_file) blob_file.seek(0) return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) ===========changed ref 1=========== # module: app.backend.app - # Replace these with your own values, either in environment variables or directly here - AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT", "mystorageaccount") - AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER", "content") - AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE", "gptkb") - AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX", "gptkbindex") - AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE", "myopenai") - AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "chat") - AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") - AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT", "embedding") - - KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") - KB_FIELDS_CATEGORY = os.getenv("KB_FIELDS_CATEGORY", "category") - KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") - CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" + CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" - CONFIG_BLOB_CLIENT = "blob_client" - - APPLICATIONINSIGHTS_CONNECTION_STRING = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") bp = Blueprint("routes", __name__, static_folder='static') ===========changed ref 2=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): + # Replace these with your own values, either in environment variables or directly here + AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT") + AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER") + AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE") + AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX") + AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") + AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") + AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL") + AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") + + KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") + KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") + # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the # keys for each service # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) # Set up clients for Cognitive Search and Storage search_client = SearchClient( endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", index_name=AZURE_SEARCH</s> ===========changed ref 3=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 <s> endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", index_name=AZURE_SEARCH_INDEX, credential=azure_credential) blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential) + blob_container_client = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) # Used by the OpenAI SDK openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" openai.api_version = "2023-05-15" openai.api_type = "azure_ad" openai_token = await azure_credential.get_token( "https://cognitiveservices.azure.com/.default" ) openai.api_key = openai_token.token # Store on app.config for later use inside requests current_app.config[CONFIG_OPENAI_TOKEN] = openai_token current_app.config[CONFIG_CREDENTIAL] = azure_credential + current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client - current_app.config[CONFIG_BLOB_CLIENT] = blob_client + # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPEN</s>
scripts.prepdocs/create_sections
Modified
Azure-Samples~azure-search-openai-demo
886f445370a94cff658a4300e4edf1f7de24039f
Allow for subdirectory structure within data directory (#569)
<1>:<add> for i, (content, pagenum) in enumerate(split_text(page_map, filename)): <del> for i, (content, pagenum) in enumerate(split_text(page_map)):
# module: scripts.prepdocs def create_sections(filename, page_map, use_vectors): <0> file_id = filename_to_id(filename) <1> for i, (content, pagenum) in enumerate(split_text(page_map)): <2> section = { <3> "id": f"{file_id}-page-{i}", <4> "content": content, <5> "category": args.category, <6> "sourcepage": blob_name_from_file_page(filename, pagenum), <7> "sourcefile": filename <8> } <9> if use_vectors: <10> section["embedding"] = compute_embedding(content) <11> yield section <12>
===========unchanged ref 0=========== at: scripts.prepdocs blob_name_from_file_page(filename, page=0) split_text(page_map, filename) filename_to_id(filename) compute_embedding(text) args = parser.parse_args()
scripts.prepdocs/read_files
Modified
Azure-Samples~azure-search-openai-demo
e1a077d29b3f33e182b540d1219b9528778d1be2
Indexing: Improve vectors throughput (#556)
<11>:<add> read_files(filename + "/*", use_vectors, vectors_batch_support) <del> read_files(filename + "/*", use_vectors) <17>:<add> sections = create_sections(os.path.basename(filename), page_map, use_vectors and not vectors_batch_support) <del> sections = create_sections(os.path.basename(filename), page_map, use_vectors) <18>:<add> print (use_vectors and vectors_batch_support) <add> if use_vectors and vectors_batch_support: <add> sections = update_embeddings_in_batch(sections)
# module: scripts.prepdocs + def read_files(path_pattern: str, use_vectors: bool, vectors_batch_support: bool): - def read_files(path_pattern: str, use_vectors: bool): <0> """ <1> Recursively read directory structure under `path_pattern` <2> and execute indexing for the individual files <3> """ <4> for filename in glob.glob(path_pattern): <5> if args.verbose: print(f"Processing '{filename}'") <6> if args.remove: <7> remove_blobs(filename) <8> remove_from_index(filename) <9> else: <10> if os.path.isdir(filename): <11> read_files(filename + "/*", use_vectors) <12> continue <13> try: <14> if not args.skipblobs: <15> upload_blobs(filename) <16> page_map = get_document_text(filename) <17> sections = create_sections(os.path.basename(filename), page_map, use_vectors) <18> index_sections(os.path.basename(filename), sections) <19> except Exception as e: <20> print(f"\tGot an error while reading {filename} -> {e} --> skipping file") <21>
===========unchanged ref 0=========== at: scripts.prepdocs args = parser.parse_args() search_creds = default_creds if args.searchkey is None else AzureKeyCredential(args.searchkey) at: scripts.prepdocs.update_embeddings_in_batch copy_s = [] batch_response[item["id"]] = emb batch_response = {} ===========changed ref 0=========== # module: scripts.prepdocs + @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding_in_batch(texts): + refresh_openai_token() + emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) + return [data.embedding for data in emb_response.data] + ===========changed ref 1=========== # module: scripts.prepdocs + def calculate_tokens_emb_aoai(input: str): + encoding = tiktoken.encoding_for_model(args.openaimodelname) + return len(encoding.encode(input)) + ===========changed ref 2=========== # module: scripts.prepdocs + def update_embeddings_in_batch(sections): + batch_queue = [] + copy_s = [] + batch_response = {} + token_count = 0 + for s in sections: + token_count += calculate_tokens_emb_aoai(s["content"]) + if token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]['token_limit'] and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]['max_batch_size']: + batch_queue.append(s) + copy_s.append(s) + else: + emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) + if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") + for emb, item in zip(emb_responses, batch_queue): + batch_response[item["id"]] = emb + batch_queue = [] + batch_queue.append(s) + token_count = calculate_tokens_emb_aoai(s["content"]) + + if batch_queue: + emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue]) + if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}") + for emb, item in zip(emb_responses, batch_queue): + batch_response[item["id"]] = emb + + for s in copy_s: + s["embedding"] = batch_response[s["id"]] + yield s + ===========changed ref 3=========== # module: scripts.prepdocs MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = 'openai_token_cred' CACHE_KEY_CREATED_TIME = 'created_time' CACHE_KEY_TOKEN_TYPE = 'token_type' + #Embedding batch support section + SUPPORTED_BATCH_AOAI_MODEL = { + 'text-embedding-ada-002': { + 'token_limit' : 8100, + 'max_batch_size' : 16 + } + } +
tests.conftest/client
Modified
Azure-Samples~azure-search-openai-demo
b4c45b12db4788d2dff62ba7b18bf0329f85b603
Adds optional streaming support for chat requests (#532)
<14>:<add> quart_app.config.update({"TESTING": True}) <del> quart_app.config.update( <15>:<del> { <16>:<del> "TESTING": True, <17>:<del> app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, <18>:<del> app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, <19>:<del> } <20>:<del> )
# module: tests.conftest @pytest_asyncio.fixture + async def client(monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search): - async def client(monkeypatch): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_SERVICE", "test-openai-service") <5> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "test-chatgpt") <6> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <7> monkeypatch.setenv("AZURE_OPENAI_EMB_DEPLOYMENT", "test-ada") <8> <9> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <10> mock_default_azure_credential.return_value = MockAzureCredential() <11> quart_app = app.create_app() <12> <13> async with quart_app.test_app() as test_app: <14> quart_app.config.update( <15> { <16> "TESTING": True, <17> app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, <18> app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, <19> } <20> ) <21> <22> yield test_app.test_client() <23>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: _pytest.monkeypatch.MonkeyPatch setattr(target: object, name: str, value: object, raising: bool=...) -> None setattr(target: str, name: object, value: Notset=..., raising: bool=...) -> None at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.util convert_to_openai_object(resp, api_key=None, api_version=None, organization=None, engine=None, plain_old_data=False) at: tests.conftest.mock_openai_chatcompletion AsyncChatCompletionIterator(answer) ===========changed ref 0=========== # module: tests.conftest - class MockedChatApproach(ChatReadRetrieveReadApproach): - def __init__(self): - pass - ===========changed ref 1=========== # module: tests.conftest - class MockedAskApproach(AskApproach): - def run(self, question, overrides): - assert question == "What is the capital of France?" - return {"answer": "Paris"} - ===========changed ref 2=========== # module: tests.conftest + @pytest.fixture + def mock_openai_embedding(monkeypatch): + async def mock_acreate(*args, **kwargs): + return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} + + monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) + ===========changed ref 3=========== # module: tests.conftest - class MockedChatApproach(ChatReadRetrieveReadApproach): - def run(self, history, overrides): - messages = ChatReadRetrieveReadApproach.get_messages_from_history(self, ChatReadRetrieveReadApproach.query_prompt_template, "gpt-3.5-turbo", history, "Generate search query") - assert messages[0]["role"] == "system" - assert messages[1]["content"] == "Generate search query" - assert messages[1]["role"] == "user" - return {"answer": "Paris", "data_points": [], "thoughts": ""} - ===========changed ref 4=========== # module: tests.conftest + @pytest.fixture + def mock_openai_chatcompletion(monkeypatch): + class AsyncChatCompletionIterator: + def __init__(self, answer): + self.num = 1 + self.answer = answer + + def __aiter__(self): + return self + + async def __anext__(self): + if self.num == 1: + self.num = 0 + return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) + else: + raise StopAsyncIteration + + async def mock_acreate(*args, **kwargs): + messages = kwargs["messages"] + if messages[-1]["content"] == "Generate search query for: What is the capital of France?": + answer = "capital of France" + else: + answer = "The capital of France is Paris." + if "stream" in kwargs and kwargs["stream"] is True: + return AsyncChatCompletionIterator(answer) + else: + return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]}) + + monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate) + ===========changed ref 5=========== # module: app.backend.approaches.approach - class ChatApproach(ABC): - @abstractmethod - async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: - ... -
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
b4c45b12db4788d2dff62ba7b18bf0329f85b603
Adds optional streaming support for chat requests (#532)
<11>:<add> r = await impl.run_without_streaming(request_json["history"], request_json.get("overrides", {})) <del> r = await impl.run(request_json["history"], request_json.get("overrides") or {})
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> approach = request_json["approach"] <4> try: <5> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <6> if not impl: <7> return jsonify({"error": "unknown approach"}), 400 <8> # Workaround for: https://github.com/openai/openai-python/issues/371 <9> async with aiohttp.ClientSession() as s: <10> openai.aiosession.set(s) <11> r = await impl.run(request_json["history"], request_json.get("overrides") or {}) <12> return jsonify(r) <13> except Exception as e: <14> logging.exception("Exception in /chat") <15> return jsonify({"error": str(e)}), 500 <16>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_CHAT_APPROACHES = "chat_approaches" bp = Blueprint("routes", __name__, static_folder='static') at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== # module: tests.conftest - class MockedChatApproach(ChatReadRetrieveReadApproach): - def __init__(self): - pass - ===========changed ref 1=========== # module: app.backend.approaches.approach - class ChatApproach(ABC): - @abstractmethod - async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: - ... - ===========changed ref 2=========== # module: tests.conftest - class MockedAskApproach(AskApproach): - def run(self, question, overrides): - assert question == "What is the capital of France?" - return {"answer": "Paris"} - ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_with_unknown_approach(client): + response = await client.post("/chat_stream", json={"approach": "test"}) + assert response.status_code == 400 + ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_request_must_be_json(client): + response = await client.post("/chat_stream") + assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" + ===========changed ref 5=========== # module: app.backend.approaches.chatreadretrieveread + class ChatReadRetrieveReadApproach: - class ChatReadRetrieveReadApproach(ChatApproach): + def run_with_streaming(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> AsyncGenerator[dict, None]: + extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=True) + yield extra_info + async for event in await chat_coroutine: + yield event + ===========changed ref 6=========== # module: tests.conftest + @pytest.fixture + def mock_openai_embedding(monkeypatch): + async def mock_acreate(*args, **kwargs): + return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} + + monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) + ===========changed ref 7=========== # module: app.backend.approaches.chatreadretrieveread + class ChatReadRetrieveReadApproach: - class ChatReadRetrieveReadApproach(ChatApproach): + def run_without_streaming(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> dict[str, Any]: + extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=False) + chat_content = (await chat_coroutine).choices[0].message.content + extra_info["answer"] = chat_content + return extra_info + ===========changed ref 8=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_mock_approach(client): - response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - assert response.status_code == 200 - result = await response.get_json() - assert result["answer"] == "Paris" - ===========changed ref 9=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_mock_approach(client): - response = await client.post( - "/chat", - json={ - "approach": "mock", - "history": [{"user": "What is the capital of France?"}], - }, - ) - assert response.status_code == 200 - result = await response.get_json() - assert result["answer"] == "Paris" - ===========changed ref 10=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_stream_text(client, snapshot): + response = await client.post( + "/chat_stream", + json={ + "approach": "rrr", + "history": [{"user": "What is the capital of France?"}], + "overrides": {"retrieval_mode": "text"}, + }, + ) + assert response.status_code == 200 + result = await response.get_data() + snapshot.assert_match(result, "result.jsonlines") + ===========changed ref 11=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_rtr_text(client, snapshot): + response = await client.post( + "/ask", + json={ + "approach": "rtr", + "question": "What is the capital of France?", + "overrides": {"retrieval_mode": "text"}, + }, + ) + assert response.status_code == 200 + result = await response.get_json() + snapshot.assert_match(json.dumps(result, indent=4), "result.json") + ===========changed ref 12=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_format_as_ndjson(): + async def gen(): + yield {"a": "I ❤️ 🐍"} + yield {"b": "Newlines inside \n strings are fine"} + + result = [line async for line in format_as_ndjson(gen())] + assert result == ['{"a": "I ❤️ 🐍"}\n', '{"b": "Newlines inside \\n strings are fine"}\n'] +
scripts.prepdocs/create_sections
Modified
Azure-Samples~azure-search-openai-demo
56d60ecaae1d125375b22d7af958913e53fc07b8
Fix prepdocs compatibility with openai key and add test (#605)
<10>:<add> section["embedding"] = compute_embedding(content, embedding_deployment) <del> section["embedding"] = compute_embedding(content)
# module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): - def create_sections(filename, page_map, use_vectors): <0> file_id = filename_to_id(filename) <1> for i, (content, pagenum) in enumerate(split_text(page_map, filename)): <2> section = { <3> "id": f"{file_id}-page-{i}", <4> "content": content, <5> "category": args.category, <6> "sourcepage": blob_name_from_file_page(filename, pagenum), <7> "sourcefile": filename <8> } <9> if use_vectors: <10> section["embedding"] = compute_embedding(content) <11> yield section <12>
===========unchanged ref 0=========== at: base64 b16encode(s: _encodable) -> bytes at: re sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr at: scripts.prepdocs split_text(page_map, filename) at: scripts.prepdocs.split_text find_page(offset) all_text = "".join(p[2] for p in page_map) start = min(end - SECTION_OVERLAP, start + last_table_start) start = 0 start = last_word start = end - SECTION_OVERLAP start -= 1 end = length end = start + MAX_SECTION_LENGTH end = last_word # Fall back to at least keeping a whole word end += 1 ===========changed ref 0=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False) + MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = 'openai_token_cred' CACHE_KEY_CREATED_TIME = 'created_time' CACHE_KEY_TOKEN_TYPE = 'token_type' #Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = { 'text-embedding-ada-002': { 'token_limit' : 8100, 'max_batch_size' : 16 } }
scripts.prepdocs/compute_embedding
Modified
Azure-Samples~azure-search-openai-demo
56d60ecaae1d125375b22d7af958913e53fc07b8
Fix prepdocs compatibility with openai key and add test (#605)
<1>:<add> return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] <del> return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"]
<s>prepdocs + @retry(retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text, embedding_deployment): - def compute_embedding(text): <0> refresh_openai_token() <1> return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] <2>
===========unchanged ref 0=========== at: scripts.prepdocs compute_embedding(text, embedding_deployment) ===========changed ref 0=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): - def create_sections(filename, page_map, use_vectors): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment) - section["embedding"] = compute_embedding(content) yield section ===========changed ref 1=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False) + MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = 'openai_token_cred' CACHE_KEY_CREATED_TIME = 'created_time' CACHE_KEY_TOKEN_TYPE = 'token_type' #Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = { 'text-embedding-ada-002': { 'token_limit' : 8100, 'max_batch_size' : 16 } }
scripts.prepdocs/refresh_openai_token
Modified
Azure-Samples~azure-search-openai-demo
56d60ecaae1d125375b22d7af958913e53fc07b8
Fix prepdocs compatibility with openai key and add test (#605)
<0>:<add> """ <add> Refresh OpenAI token every 5 minutes <add> """ <add> if CACHE_KEY_TOKEN_TYPE in open_ai_token_cache and open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] == 'azure_ad' and open_ai_token_cache[CACHE_KEY_CREATED_TIME] + 300 < time.time(): <del> if open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] == 'azure_ad' and open_ai_token_cache[CACHE_KEY_CREATED_TIME] + 300 < time.time():
# module: scripts.prepdocs - # refresh open ai token every 5 minutes def refresh_openai_token(): <0> if open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] == 'azure_ad' and open_ai_token_cache[CACHE_KEY_CREATED_TIME] + 300 < time.time(): <1> token_cred = open_ai_token_cache[CACHE_KEY_TOKEN_CRED] <2> openai.api_key = token_cred.get_token("https://cognitiveservices.azure.com/.default").token <3> open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time() <4>
===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace(verbose=False) args = parser.parse_args() at: scripts.prepdocs.remove_from_index search_client = SearchClient(endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds) at: time sleep(secs: float) -> None ===========changed ref 0=========== <s>prepdocs + @retry(retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text, embedding_deployment): - def compute_embedding(text): refresh_openai_token() + return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): - def create_sections(filename, page_map, use_vectors): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment) - section["embedding"] = compute_embedding(content) yield section ===========changed ref 2=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False) + MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = 'openai_token_cred' CACHE_KEY_CREATED_TIME = 'created_time' CACHE_KEY_TOKEN_TYPE = 'token_type' #Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = { 'text-embedding-ada-002': { 'token_limit' : 8100, 'max_batch_size' : 16 } }
scripts.prepdocs/read_files
Modified
Azure-Samples~azure-search-openai-demo
56d60ecaae1d125375b22d7af958913e53fc07b8
Fix prepdocs compatibility with openai key and add test (#605)
<17>:<add> sections = create_sections(os.path.basename(filename), page_map, use_vectors and not vectors_batch_support, embedding_deployment) <del> sections = create_sections(os.path.basename(filename), page_map, use_vectors and not vectors_batch_support) <18>:<del> print (use_vectors and vectors_batch_support)
# module: scripts.prepdocs + def read_files(path_pattern: str, use_vectors: bool, vectors_batch_support: bool, embedding_deployment: str = None): - def read_files(path_pattern: str, use_vectors: bool, vectors_batch_support: bool): <0> """ <1> Recursively read directory structure under `path_pattern` <2> and execute indexing for the individual files <3> """ <4> for filename in glob.glob(path_pattern): <5> if args.verbose: print(f"Processing '{filename}'") <6> if args.remove: <7> remove_blobs(filename) <8> remove_from_index(filename) <9> else: <10> if os.path.isdir(filename): <11> read_files(filename + "/*", use_vectors, vectors_batch_support) <12> continue <13> try: <14> if not args.skipblobs: <15> upload_blobs(filename) <16> page_map = get_document_text(filename) <17> sections = create_sections(os.path.basename(filename), page_map, use_vectors and not vectors_batch_support) <18> print (use_vectors and vectors_batch_support) <19> if use_vectors and vectors_batch_support: <20> sections = update_embeddings_in_batch(sections) <21> index_sections(os.path.basename(filename), sections) <22> except Exception as e: <23> print(f"\tGot an error while reading {filename} -> {e} --> skipping file") <24>
===========unchanged ref 0=========== at: glob glob(pathname: AnyStr, *, recursive: bool=...) -> List[AnyStr] at: openai api_key = os.environ.get("OPENAI_API_KEY") at: os.path isdir(s: AnyPath) -> bool at: scripts.prepdocs args = argparse.Namespace(verbose=False) args = parser.parse_args() open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = 'openai_token_cred' CACHE_KEY_CREATED_TIME = 'created_time' CACHE_KEY_TOKEN_TYPE = 'token_type' remove_blobs(filename) remove_from_index(filename) at: time time() -> float ===========changed ref 0=========== <s>prepdocs + @retry(retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text, embedding_deployment): - def compute_embedding(text): refresh_openai_token() + return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: scripts.prepdocs - # refresh open ai token every 5 minutes def refresh_openai_token(): + """ + Refresh OpenAI token every 5 minutes + """ + if CACHE_KEY_TOKEN_TYPE in open_ai_token_cache and open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] == 'azure_ad' and open_ai_token_cache[CACHE_KEY_CREATED_TIME] + 300 < time.time(): - if open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] == 'azure_ad' and open_ai_token_cache[CACHE_KEY_CREATED_TIME] + 300 < time.time(): token_cred = open_ai_token_cache[CACHE_KEY_TOKEN_CRED] openai.api_key = token_cred.get_token("https://cognitiveservices.azure.com/.default").token open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time() ===========changed ref 2=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): - def create_sections(filename, page_map, use_vectors): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment) - section["embedding"] = compute_embedding(content) yield section ===========changed ref 3=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False) + MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = 'openai_token_cred' CACHE_KEY_CREATED_TIME = 'created_time' CACHE_KEY_TOKEN_TYPE = 'token_type' #Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = { 'text-embedding-ada-002': { 'token_limit' : 8100, 'max_batch_size' : 16 } }
tests.test_app/test_format_as_ndjson
Modified
Azure-Samples~azure-search-openai-demo
583b56229baf679c3665c29eb436c659bb232bf2
Use environ vs getenv (#611)
<4>:<add> result = [line async for line in app.format_as_ndjson(gen())] <del> result = [line async for line in format_as_ndjson(gen())]
# module: tests.test_app @pytest.mark.asyncio async def test_format_as_ndjson(): <0> async def gen(): <1> yield {"a": "I ❤️ 🐍"} <2> yield {"b": "Newlines inside \n strings are fine"} <3> <4> result = [line async for line in format_as_ndjson(gen())] <5> assert result == ['{"a": "I ❤️ 🐍"}\n', '{"b": "Newlines inside \\n strings are fine"}\n'] <6>
===========unchanged ref 0=========== at: tests.test_app.test_chat_stream_text response = await client.post( "/chat_stream", json={ "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_missing_env_vars(): + quart_app = app.create_app() + + with pytest.raises(quart.testing.app.LifespanError) as exc_info: + async with quart_app.test_app() as test_app: + test_app.test_client() + assert str(exc_info.value) == "Lifespan failure in startup. ''AZURE_OPENAI_EMB_DEPLOYMENT''" +
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
583b56229baf679c3665c29eb436c659bb232bf2
Use environ vs getenv (#611)
<1>:<add> AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] <del> AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT") <2>:<add> AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] <del> AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER") <3>:<add> AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] <del> AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE") <4>:<add> AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] <del> AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX") <5>:<add> AZURE_OPENAI_SERVICE = os.environ["AZURE_OPENAI_SERVICE"] <del> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <6>:<add> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ["AZURE_OPENAI_CHATGPT_DEPLOYMENT"] <del> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <7>:<add> AZURE_OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <del> AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL") <8>:<add> AZURE_OPENAI_EMB_DEPLOYMENT = os.environ["AZURE_OPENAI_EMB_DEPLOYMENT"] <del> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT")
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Replace these with your own values, either in environment variables or directly here <1> AZURE_STORAGE_ACCOUNT = os.getenv("AZURE_STORAGE_ACCOUNT") <2> AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER") <3> AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE") <4> AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX") <5> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <6> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <7> AZURE_OPENAI_CHATGPT_MODEL = os.getenv("AZURE_OPENAI_CHATGPT_MODEL") <8> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") <9> <10> KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") <11> KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") <12> <13> # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, <14> # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the <15> # keys for each service <16> # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) <17> azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) <18> <19> # Set up clients for Cognitive Search and Storage <20> search_client = SearchClient( <21> endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", </s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 credential=azure_credential) blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential) blob_container_client = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) # Used by the OpenAI SDK openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" openai.api_version = "2023-05-15" openai.api_type = "azure_ad" openai_token = await azure_credential.get_token( "https://cognitiveservices.azure.com/.default" ) openai.api_key = openai_token.token # Store on app.config for later use inside requests current_app.config[CONFIG_OPENAI_TOKEN] = openai_token current_app.config[CONFIG_CREDENTIAL] = azure_credential current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ), "rrr": ReadRetrieveReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE,</s> ===========below chunk 1=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 2 <s>_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ), "rda": ReadDecomposeAsk(search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT ) } current_app.config[CONFIG_CHAT_APPROACHES] = { "rrr": ChatReadRetrieveReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) } ===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" bp = Blueprint("routes", __name__, static_folder='static') at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.readdecomposeask ReadDecomposeAsk(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.readretrieveread ReadRetrieveReadApproach(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: openai api_key = os.environ.get("OPENAI_API_KEY") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) at: os environ = _createenviron() ===========unchanged ref 1=========== getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_missing_env_vars(): + quart_app = app.create_app() + + with pytest.raises(quart.testing.app.LifespanError) as exc_info: + async with quart_app.test_app() as test_app: + test_app.test_client() + assert str(exc_info.value) == "Lifespan failure in startup. ''AZURE_OPENAI_EMB_DEPLOYMENT''" + ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_format_as_ndjson(): async def gen(): yield {"a": "I ❤️ 🐍"} yield {"b": "Newlines inside \n strings are fine"} + result = [line async for line in app.format_as_ndjson(gen())] - result = [line async for line in format_as_ndjson(gen())] assert result == ['{"a": "I ❤️ 🐍"}\n', '{"b": "Newlines inside \\n strings are fine"}\n']
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
de2f451b171e537c7dd4a5a853affaab0bae00ac
Update README.md with logging info (#610)
<6>:<add> # Level should be one of https://docs.python.org/3/library/logging.html#logging-levels <add> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", "ERROR")) <del>
# module: app.backend.app def create_app(): <0> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <1> configure_azure_monitor() <2> AioHttpClientInstrumentor().instrument() <3> app = Quart(__name__) <4> app.register_blueprint(bp) <5> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) <6> <7> return app <8>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder='static') at: logging basicConfig(*, filename: Optional[StrPath]=..., filemode: str=..., format: str=..., datefmt: Optional[str]=..., style: str=..., level: Optional[_Level]=..., stream: Optional[IO[str]]=..., handlers: Optional[Iterable[Handler]]=...) -> None at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_until_final_call
Modified
Azure-Samples~azure-search-openai-demo
2ba024a76108729a0db812fc5b47ad7c58a6b8e0
Fix prompt_template override, add tests (#606)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> user_q = 'Generate search query for: ' + history[-1]["user"] <8> <9> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <10> messages = self.get_messages_from_history( <11> self.query_prompt_template, <12> self.chatgpt_model, <13> history, <14> user_q, <15> self.query_prompt_few_shots, <16> self.chatgpt_token_limit - len(user_q) <17> ) <18> <19> chat_completion = await openai.ChatCompletion.acreate( <20> deployment_id=self.chatgpt_deployment, <21> model=self.chatgpt_model, <22> messages=messages, <23> temperature=0.0, <24> max_tokens=32, <25> n=1) <26> <27> query_text = chat_completion.choices[0].message.content <28> if query_text.strip() == "0": <29> query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query <30> <31> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <32> <33> # If</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple: # offset: 1 if has_vector: query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) else: r = await self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) async for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonew</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple: # offset: 2 <s> async for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # STEP 3: Generate a contextual and content specific answer using the search results and chat history # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_override") if prompt_override is None: system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) else: system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) messages = self.get_messages_from_history( system_message, self.chatgpt_model, history, history[-1]["user"]+ "\n\nSources:\n" + content, # Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt. max_tokens=self.chatgpt_token_limit) msg_to_display = '\n\n'.join([</s> ===========below chunk 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple: # offset: 3 <s>message) for message in messages]) extra_info = {"data_points": results, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')} chat_coroutine = openai.ChatCompletion.acreate( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1, stream=should_stream) return (extra_info, chat_coroutine) ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.retrieve
Modified
Azure-Samples~azure-search-openai-demo
38dd16c0a39dbc54a37f2b0124b7f99be3b54817
Run black on all Python files (#632)
<9>:<add> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text) <del> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] <10>:<add> query_vector = embedding["data"][0]["embedding"] <19>:<add> r = await self.search_client.search( <del> r = await self.search_client.search(query_text, <20>:<add> query_text, <add> filter=filter, <del> filter=filter, <21>:<add> query_type=QueryType.SEMANTIC, <del> query_type=QueryType.SEMANTIC, <22>:<add> query_language="en-us", <del> query_language="en-us", <23>:<add> query_speller="lexicon", <del> query_speller="lexicon", <24>:<add> semantic_configuration_name="default", <del> semantic_configuration_name="default", <25>:<add> top=top, <del> top = top, <26>:<add> query_caption="extractive|highlight-false" if use_semantic_captions else None, <del> query_caption="extractive|highlight-false" if use_semantic_captions else None, <27>:<add> vector=query_vector, <del> vector=query_vector, <28>:<add> top_k=50 if query_vector else None, <del> top_k=50 if query_vector else None, <29>:<add> vector_fields="embedding" if query_vector else None, <del> vector_fields="embedding" if query_vector else None) <30>:<add> )
# module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] <10> else: <11> query_vector = None <12> <13> # Only keep the text query if the retrieval mode uses text, otherwise drop it <14> if not has_text: <15> query_text = "" <16> <17> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <18> if overrides.get("semantic_ranker") and has_text: <19> r = await self.search_client.search(query_text, <20> filter=filter, <21> query_type=QueryType.SEMANTIC, <22> query_language="en-us", <23> query_speller="lexicon", <24> semantic_configuration_name="default", <25> top = top, <26> query_caption="extractive|highlight-false" if use_semantic_captions else None, <27> vector=query_vector, <28> top_k=50 if query_vector else None, <29> vector_fields="embedding" if query_vector else None) <30> else</s>
===========below chunk 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 1 r = await self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) async for doc in r] else: results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r] content = "\n".join(results) return results, content ===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " "Answer the question using only the data provided in the information sources below. " "For tabular information return it as an html table. Do not return markdown format. " "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " 'For example, if the question is "What color is the sky?" and one of the information sources says "info123: the sky is blue whenever it\'s not cloudy", then answer with "The sky is blue [info123]" ' 'It\'s important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (":"). ' 'If there are multiple sources, cite each one in their own square brackets. For example, use "[info343][ref-76]" and not "[info343,ref-76]". ' "Never quote tool names as sources." "If you cannot answer using the sources below, say that you don't know. " "\n\nYou can access to the following tools:" ) template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" ===========unchanged ref 1=========== acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): """ Attempt to answer questions by iteratively evaluating the question to see what information is missing, and once all information is present then formulate an answer. Each iteration consists of two parts: 1. use GPT to see if we need more information 2. if more data is needed, use the requested "tool" to retrieve it. The last call to GPT answers the actual question. This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. [1] E. Karpas, et al. arXiv:2205.00445 """ + template_prefix = ( - template_prefix = \ + "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " - "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \ + "Answer the question using only the data provided in the information sources below. " - "Answer the question using only the data provided in the information sources below. " \ + "For tabular information return it as an html table. Do not return markdown format. " - "For tabular information return it as an html table. Do not return markdown format. " \ + "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " - "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \ + 'For example, if the question is "What color is the sky?" and one of the information sources says "info123: the sky is blue whenever it\'s not cloudy", then answer with "The sky is blue [info123]" ' - "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]</s> ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): # offset: 1 <s> information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \ + 'It\'s important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (":"). ' - "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \ + 'If there are multiple sources, cite each one in their own square brackets. For example, use "[info343][ref-76]" and not "[info343,ref-76]". ' - "If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \ + "Never quote tool names as sources." - "Never quote tool names as sources." \ + "If you cannot answer using the sources below, say that you don't know. " - "If you cannot answer using the sources below, say that you don't know. " \ + "\n\nYou can access to the following tools:" - "\n\nYou can access to the following tools:" + ) template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc."
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_until_final_call
Modified
Azure-Samples~azure-search-openai-demo
38dd16c0a39dbc54a37f2b0124b7f99be3b54817
Run black on all Python files (#632)
<7>:<add> user_q = "Generate search query for: " + history[-1]["user"] <del> user_q = 'Generate search query for: ' + history[-1]["user"] <16>:<add> self.chatgpt_token_limit - len(user_q), <del> self.chatgpt_token_limit - len(user_q) <17>:<add> ) <del> ) <25>:<add> n=1, <del> n=1) <26>:<add> ) <29>:<add> query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query <del> query_text =
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: + def run_until_final_call( + self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False - def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple: + ) -> tuple: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> user_q = 'Generate search query for: ' + history[-1]["user"] <8> <9> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <10> messages = self.get_messages_from_history( <11> self.query_prompt_template, <12> self.chatgpt_model, <13> history, <14> user_q, <15> self.query_prompt_few_shots, <16> self.chatgpt_token_limit - len(user_q) <17> ) <18> <19> chat_completion = await openai.ChatCompletion.acreate( <20> deployment_id=self.chatgpt_deployment, <21> model=self.chatgpt_model, <22> messages=messages, <23> temperature=0.0, <24> max_tokens=32, <25> n=1) <26> <27> query_text = chat_completion.choices[0].message.content <28> if query_text.strip() == "0": <29> query_text =</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: + def run_until_final_call( + self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False - def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple: + ) -> tuple: # offset: 1 # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query # If retrieval mode includes vectors, compute an embedding for the query if has_vector: query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) else: r = await self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: + def run_until_final_call( + self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False - def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple: + ) -> tuple: # offset: 2 <s>_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) async for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # STEP 3: Generate a contextual and content specific answer using the search results and chat history # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") if prompt_override is None: system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) else: system_</s> ===========below chunk 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: + def run_until_final_call( + self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False - def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple: + ) -> tuple: # offset: 3 <s> prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) messages = self.get_messages_from_history( system_message, self.chatgpt_model, history, history[-1]["user"]+ "\n\nSources:\n" + content, # Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt. max_tokens=self.chatgpt_token_limit) msg_to_display = '\n\n'.join([str(message) for message in messages]) extra_info = {"data_points": results, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')} chat_coroutine = openai.ChatCompletion.acreate( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1, stream=should_stream) return (extra_info, chat_coroutine) ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
38dd16c0a39dbc54a37f2b0124b7f99be3b54817
Run black on all Python files (#632)
<9>:<add> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=q) <del> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=q))["data"][0]["embedding"] <10>:<add> query_vector = embedding["data"][0]["embedding"] <18>:<add> r = await self.search_client.search( <del> r = await self.search_client.search(query_text, <19>:<add> query_text, <add> filter=filter, <del> filter=filter, <20>:<add> query_type=QueryType.SEMANTIC, <del> query_type=QueryType.SEMANTIC, <21>:<add> query_language="en-us", <del> query_language="en-us", <22>:<add> query_speller="lexicon", <del> query_speller="lexicon", <23>:<add> semantic_configuration_name="default", <del> semantic_configuration_name="default", <24>:<add> top=top, <del> top=top, <25>:<add> query_caption="extractive|highlight-false" if use_semantic_captions else None, <del> query_caption="extractive|highlight-false" if use_semantic_captions else None, <26>:<add> vector=query_vector, <del> vector=query_vector, <27>:<add> top_k=50 if query_vector else None, <del> top_k=50 if query_vector else None, <28>:<add> vector_fields="embedding" if query_vector else None, <del> vector_fields="embedding" if query_vector else None) <29>:<add> )
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=q))["data"][0]["embedding"] <10> else: <11> query_vector = None <12> <13> # Only keep the text query if the retrieval mode uses text, otherwise drop it <14> query_text = q if has_text else "" <15> <16> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <17> if overrides.get("semantic_ranker") and has_text: <18> r = await self.search_client.search(query_text, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None, <26> vector=query_vector, <27> top_k=50 if query_vector else None, <28> vector_fields="embedding" if query_vector else None) <29> else: </s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 1 filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) async for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder(overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message('user', user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message('assistant', self.answer) message_builder.append_message('user', self.question) messages = message_builder.messages chat_completion = await openai.ChatCompletion.acreate( deployment_id=self.openai_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1) return {"data_points": results, "answer": chat_completion.choices[0].message.content, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + '\n\n'.join([str(message</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 2 <s> f"Question:<br>{query_text}<br><br>Prompt:<br>" + '\n\n'.join([str(message) for message in messages])} ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ "If you cannot answer using the sources below, say you don't know. Use below example to answer" question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment ===========unchanged ref 1=========== self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.AskApproach run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": system_content}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" ===========unchanged ref 2=========== acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.search
Modified
Azure-Samples~azure-search-openai-demo
38dd16c0a39dbc54a37f2b0124b7f99be3b54817
Run black on all Python files (#632)
<9>:<add> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text) <del> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] <10>:<add> query_vector = embedding["data"][0]["embedding"] <18>:<add> r = await self.search_client.search( <del> r = await self.search_client.search(query_text, <19>:<add> query_text, <add> filter=filter, <del> filter=filter, <20>:<add> query_type=QueryType.SEMANTIC, <del> query_type=QueryType.SEMANTIC, <21>:<add> query_language="en-us", <del> query_language="en-us", <22>:<add> query_speller="lexicon", <del> query_speller="lexicon", <23>:<add> semantic_configuration_name="default", <del> semantic_configuration_name="default", <24>:<add> top=top, <del> top=top, <25>:<add> query_caption="extractive|highlight-false" if use_semantic_captions else None, <del> query_caption="extractive|highlight-false" if use_semantic_captions else None, <26>:<add> vector=query_vector, <del> vector=query_vector, <27>:<add> top_k=50 if query_vector else None, <del> top_k=50 if query_vector else None, <28>:<add> vector_fields="embedding" if query_vector else None, <del> vector_fields="embedding" if query_vector else None) <29>:<add> ) <30>:<add> r = await self.search_client.search(
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] <10> else: <11> query_vector = None <12> <13> # Only keep the text query if the retrieval mode uses text, otherwise drop it <14> if not has_text: <15> query_text = "" <16> <17> if overrides.get("semantic_ranker") and has_text: <18> r = await self.search_client.search(query_text, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None, <26> vector=query_vector, <27> top_k=50 if query_vector else None, <28> vector_fields="embedding" if query_vector else None) <29> else: <30> r = await self.search_client.search(query_text</s>
===========below chunk 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: # offset: 1 filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) async for doc in r] else: results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) async for doc in r] return results, "\n".join(results) ===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ + system_chat_template = ( - system_chat_template = \ + "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " - "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ + + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " - "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ + + "Answer the following question using only the data provided in the sources below. " - "Answer the following question using only the data provided in the sources below. " + \ + + "For tabular information return it as an html table. Do not return markdown format. " - "For tabular information return it as an html table. Do not return markdown format. " + \ + + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " - "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ + + "If you cannot answer using the sources below, say you don't know. Use below example to answer" - "If you cannot answer using the sources below, say you don't know. Use below example to answer" + ) + # shots/sample conversation - #shots/sample conversation question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-</s> ===========changed ref 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): # offset: 1 <s> Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: # Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new</s>
scripts.prepdocs/create_sections
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<10>:<add> section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) <del> section["embedding"] = compute_embedding(content, embedding_deployment)
# module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): <0> file_id = filename_to_id(filename) <1> for i, (content, pagenum) in enumerate(split_text(page_map, filename)): <2> section = { <3> "id": f"{file_id}-page-{i}", <4> "content": content, <5> "category": args.category, <6> "sourcepage": blob_name_from_file_page(filename, pagenum), <7> "sourcefile": filename, <8> } <9> if use_vectors: <10> section["embedding"] = compute_embedding(content, embedding_deployment) <11> yield section <12>
===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace(verbose=False, openaihost="azure") args = parser.parse_args() blob_name_from_file_page(filename, page=0) split_text(page_map, filename) filename_to_id(filename) compute_embedding(text, embedding_deployment, embedding_model) ===========changed ref 0=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
scripts.prepdocs/compute_embedding
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<1>:<add> embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} <add> return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] <del> return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"]
# module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): <0> refresh_openai_token() <1> return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] <2>
===========unchanged ref 0=========== at: openai.error RateLimitError(message=None, http_body=None, http_status=None, json_body=None, headers=None, code=None) at: scripts.prepdocs args = argparse.Namespace(verbose=False, openaihost="azure") args = parser.parse_args() before_retry_sleep(retry_state) refresh_openai_token() at: tenacity retry(stop: "StopBaseT"=stop_never, wait: "WaitBaseT"=wait_none(), retry: "RetryBaseT"=retry_if_exception_type(), before: t.Callable[["RetryCallState"], None]=before_nothing, after: t.Callable[["RetryCallState"], None]=after_nothing, before_sleep: t.Optional[t.Callable[["RetryCallState"], None]]=None, reraise: bool=False, retry_error_cls: t.Type[RetryError]=RetryError, retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]]=None, *, sleep: t.Callable[[t.Union[int, float]], None]=sleep) -> t.Any at: tenacity.retry retry_if_exception_type(exception_types: typing.Union[ typing.Type[BaseException], typing.Tuple[typing.Type[BaseException], ...], ]=Exception) at: tenacity.stop stop_after_attempt(max_attempt_number: int) at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) ===========changed ref 0=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 1=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
scripts.prepdocs/compute_embedding_in_batch
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<1>:<add> embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} <add> emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) <del> emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts)
<s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): <0> refresh_openai_token() <1> emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) <2> return [data.embedding for data in emb_response.data] <3>
===========unchanged ref 0=========== at: openai.error RateLimitError(message=None, http_body=None, http_status=None, json_body=None, headers=None, code=None) at: tenacity retry(stop: "StopBaseT"=stop_never, wait: "WaitBaseT"=wait_none(), retry: "RetryBaseT"=retry_if_exception_type(), before: t.Callable[["RetryCallState"], None]=before_nothing, after: t.Callable[["RetryCallState"], None]=after_nothing, before_sleep: t.Optional[t.Callable[["RetryCallState"], None]]=None, reraise: bool=False, retry_error_cls: t.Type[RetryError]=RetryError, retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]]=None, *, sleep: t.Callable[[t.Union[int, float]], None]=sleep) -> t.Any at: tenacity.retry retry_if_exception_type(exception_types: typing.Union[ typing.Type[BaseException], typing.Tuple[typing.Type[BaseException], ...], ]=Exception) at: tenacity.stop stop_after_attempt(max_attempt_number: int) at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) ===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 2=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
scripts.prepdocs/read_files
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<23>:<add> embedding_model,
# module: scripts.prepdocs + def read_files( + path_pattern: str, + use_vectors: bool, + vectors_batch_support: bool, + embedding_deployment: str = None, + embedding_model: str = None, + ): - def read_files(path_pattern: str, use_vectors: bool, vectors_batch_support: bool, embedding_deployment: str = None): <0> """ <1> Recursively read directory structure under `path_pattern` <2> and execute indexing for the individual files <3> """ <4> for filename in glob.glob(path_pattern): <5> if args.verbose: <6> print(f"Processing '{filename}'") <7> if args.remove: <8> remove_blobs(filename) <9> remove_from_index(filename) <10> else: <11> if os.path.isdir(filename): <12> read_files(filename + "/*", use_vectors, vectors_batch_support) <13> continue <14> try: <15> if not args.skipblobs: <16> upload_blobs(filename) <17> page_map = get_document_text(filename) <18> sections = create_sections( <19> os.path.basename(filename), <20> page_map, <21> use_vectors and not vectors_batch_support, <22> embedding_deployment, <23> ) <24> if use_vectors and vectors_batch_support: <25> sections = update_embeddings_in_batch(sections) <26> index_sections(os.path.basename(filename), sections) <27> except Exception as e: <28> print(f"\tGot an error while reading {filename} -> {e} --> skipping file") <29>
===========unchanged ref 0=========== at: glob glob(pathname: AnyStr, *, recursive: bool=...) -> List[AnyStr] at: openai api_key = os.environ.get("OPENAI_API_KEY") at: os.path isdir(s: AnyPath) -> bool at: scripts.prepdocs args = argparse.Namespace(verbose=False, openaihost="azure") args = parser.parse_args() open_ai_token_cache = {} open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] = "azure_ad" open_ai_token_cache[CACHE_KEY_TOKEN_CRED] = azd_credential open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time() CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" remove_blobs(filename) remove_from_index(filename) at: time time() -> float ===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 1=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 2=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 3=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<2>:<add> self.openai_model = openai_model <3>:<add> self.embedding_model = embedding_model <5>:<add> self.openai_host = openai_host
# module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): <0> self.search_client = search_client <1> self.openai_deployment = openai_deployment <2> self.embedding_deployment = embedding_deployment <3> self.sourcepage_field = sourcepage_field <4> self.content_field = content_field <5>
===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 1=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 2=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 3=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 4=========== # module: scripts.prepdocs + def read_files( + path_pattern: str, + use_vectors: bool, + vectors_batch_support: bool, + embedding_deployment: str = None, + embedding_model: str = None, + ): - def read_files(path_pattern: str, use_vectors: bool, vectors_batch_support: bool, embedding_deployment: str = None): """ Recursively read directory structure under `path_pattern` and execute indexing for the individual files """ for filename in glob.glob(path_pattern): if args.verbose: print(f"Processing '{filename}'") if args.remove: remove_blobs(filename) remove_from_index(filename) else: if os.path.isdir(filename): read_files(filename + "/*", use_vectors, vectors_batch_support) continue try: if not args.skipblobs: upload_blobs(filename) page_map = get_document_text(filename) sections = create_sections( os.path.basename(filename), page_map, use_vectors and not vectors_batch_support, embedding_deployment, + embedding_model, ) if use_vectors and vectors_batch_support: sections = update_embeddings_in_batch(sections) index_sections(os.path.basename(filename), sections) except Exception as e: print(f"\tGot an error while reading {filename} -> {e} --> skipping file") ===========changed ref 5=========== # module: scripts.prepdocs if __name__ == "__main__": parser = argparse.ArgumentParser( description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v", ) parser.add_argument("files", help="Files to be processed") parser.add_argument( "--category", help="Value for the category field in the search index for all sections indexed in this run" ) parser.add_argument( "--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage" ) parser.add_argument("--storageaccount", help="Azure Blob Storage account name") parser.add_argument("--container", help="Azure Blob Storage container name") parser.add_argument( "--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)", ) parser.add_argument( "--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)" ) parser.add_argument( "--searchservice", help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)", ) parser.add_argument( "--index", help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)", ) parser.add_argument( "--searchkey", required=False, help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)", ) + parser.add_argument("--openaihost", help="Host of the API used to compute embeddings ('azure' or 'openai')") parser.add_argument("--openaiservice", help="</s>
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.retrieve
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<9>:<add> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <add> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) <del> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text)
# module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text) <10> query_vector = embedding["data"][0]["embedding"] <11> else: <12> query_vector = None <13> <14> # Only keep the text query if the retrieval mode uses text, otherwise drop it <15> if not has_text: <16> query_text = "" <17> <18> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <19> if overrides.get("semantic_ranker") and has_text: <20> r = await self.search_client.search( <21> query_text, <22> filter=filter, <23> query_type=QueryType.SEMANTIC, <24> query_language="en-us", <25> query_speller="lexicon", <26> semantic_configuration_name="default", <27> top=top, <28> query_caption="extractive|highlight-false" if use_semantic_captions else None, <29> vector=query_vector, <30> top_k=50 if query_vector else None, <31> vector_fields="embedding" if query_vector else None, <32> ) <33> </s>
===========below chunk 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 1 r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r] content = "\n".join(results) return results, content ===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " "Answer the question using only the data provided in the information sources below. " "For tabular information return it as an html table. Do not return markdown format. " "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " 'For example, if the question is "What color is the sky?" and one of the information sources says "info123: the sky is blue whenever it\'s not cloudy", then answer with "The sky is blue [info123]" ' 'It\'s important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (":"). ' 'If there are multiple sources, cite each one in their own square brackets. For example, use "[info343][ref-76]" and not "[info343,ref-76]". ' "Never quote tool names as sources." "If you cannot answer using the sources below, say that you don't know. " "\n\nYou can access to the following tools:" ) template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach.__init__ self.search_client = search_client at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) ===========unchanged ref 1=========== at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 1=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 2=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 3=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 4=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<27>:<add> if self.openai_type == "azure": <add> llm = AzureOpenAI( <del> llm = AzureOpenAI( <28>:<add> deployment_name=self.openai_deployment, <del> deployment_name=self.openai_deployment, <29>:<add> temperature=overrides.get("temperature", 0.3), <del> temperature=overrides.get("temperature") or 0.3, <30>:<add> openai_api_key=openai.api_key, <del> openai_api_key=openai.api_key, <31>:<add> ) <del> ) <32>:<add> else: <add> llm = OpenAI( <add> model_name=self.openai_model, <add> temperature=overrides.get("temperature", 0.3), <add> openai_api_key=openai.api_key, <add> ) <add>
# module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: <0> retrieve_results = None <1> <2> async def retrieve_and_store(q: str) -> Any: <3> nonlocal retrieve_results <4> retrieve_results, content = await self.retrieve(q, overrides) <5> return content <6> <7> # Use to capture thought process during iterations <8> cb_handler = HtmlCallbackHandler() <9> cb_manager = CallbackManager(handlers=[cb_handler]) <10> <11> acs_tool = Tool( <12> name="CognitiveSearch", <13> func=lambda _: "Not implemented", <14> coroutine=retrieve_and_store, <15> description=self.CognitiveSearchToolDescription, <16> callbacks=cb_manager, <17> ) <18> employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) <19> tools = [acs_tool, employee_tool] <20> <21> prompt = ZeroShotAgent.create_prompt( <22> tools=tools, <23> prefix=overrides.get("prompt_template_prefix") or self.template_prefix, <24> suffix=overrides.get("prompt_template_suffix") or self.template_suffix, <25> input_variables=["input", "agent_scratchpad"], <26> ) <27> llm = AzureOpenAI( <28> deployment_name=self.openai_deployment, <29> temperature=overrides.get("temperature") or 0.3, <30> openai_api_key=openai.api_key, <31> ) <32> chain = LLMChain(llm=llm, prompt=prompt) <33> agent_exec = AgentExecutor.from_agent_and_tools( <34> agent=ZeroShotAgent(llm_chain=chain), tools=tools, verbose=True, callback_manager=cb_manager <35> ) <36> result = await agent_exec.arun(q) <37> <38> # Remove references to tool</s>
===========below chunk 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 1 result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread EmployeeInfoTool(employee_name: str, callbacks: Callbacks=None) at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " "Answer the question using only the data provided in the information sources below. " "For tabular information return it as an html table. Do not return markdown format. " "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " 'For example, if the question is "What color is the sky?" and one of the information sources says "info123: the sky is blue whenever it\'s not cloudy", then answer with "The sky is blue [info123]" ' 'It\'s important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (":"). ' 'If there are multiple sources, cite each one in their own square brackets. For example, use "[info343][ref-76]" and not "[info343,ref-76]". ' "Never quote tool names as sources." "If you cannot answer using the sources below, say that you don't know. " "\n\nYou can access to the following tools:" ) template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any retrieve(query_text: str, overrides: dict[str, Any]) -> Any at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach.__init__ self.openai_deployment = openai_deployment ===========unchanged ref 1=========== self.sourcepage_field = sourcepage_field self.content_field = content_field at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach.retrieve r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) at: approaches.approach.AskApproach run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any] at: openai api_key = os.environ.get("OPENAI_API_KEY") at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # If retrieval mode includes vectors, compute an embedding for the query if has_vector: + embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} + embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) - embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = "" # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, </s> ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s>_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r] content = "\n".join(results) return results, content
tests.test_prepdocs/test_compute_embedding_success
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<22>:<add> assert compute_embedding("foo", "ada", "text-ada-003") == [ <del> assert compute_embedding("foo", "ada") == [
# module: tests.test_prepdocs def test_compute_embedding_success(monkeypatch, capsys): <0> monkeypatch.setattr(args, "verbose", True) <1> <2> def mock_create(*args, **kwargs): <3> # From https://platform.openai.com/docs/api-reference/embeddings/create <4> return { <5> "object": "list", <6> "data": [ <7> { <8> "object": "embedding", <9> "embedding": [ <10> 0.0023064255, <11> -0.009327292, <12> -0.0028842222, <13> ], <14> "index": 0, <15> } <16> ], <17> "model": "text-embedding-ada-002", <18> "usage": {"prompt_tokens": 8, "total_tokens": 8}, <19> } <20> <21> monkeypatch.setattr(openai.Embedding, "create", mock_create) <22> assert compute_embedding("foo", "ada") == [ <23> 0.0023064255, <24> -0.009327292, <25> -0.0028842222, <26> ] <27>
===========unchanged ref 0=========== at: _pytest.capture capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None] at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: scripts.prepdocs args = argparse.Namespace(verbose=False, openaihost="azure") args = parser.parse_args() compute_embedding(text, embedding_deployment, embedding_model) ===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 2=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 3=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 4=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 5=========== # module: scripts.prepdocs + def read_files( + path_pattern: str, + use_vectors: bool, + vectors_batch_support: bool, + embedding_deployment: str = None, + embedding_model: str = None, + ): - def read_files(path_pattern: str, use_vectors: bool, vectors_batch_support: bool, embedding_deployment: str = None): """ Recursively read directory structure under `path_pattern` and execute indexing for the individual files """ for filename in glob.glob(path_pattern): if args.verbose: print(f"Processing '{filename}'") if args.remove: remove_blobs(filename) remove_from_index(filename) else: if os.path.isdir(filename): read_files(filename + "/*", use_vectors, vectors_batch_support) continue try: if not args.skipblobs: upload_blobs(filename) page_map = get_document_text(filename) sections = create_sections( os.path.basename(filename), page_map, use_vectors and not vectors_batch_support, embedding_deployment, + embedding_model, ) if use_vectors and vectors_batch_support: sections = update_embeddings_in_batch(sections) index_sections(os.path.basename(filename), sections) except Exception as e: print(f"\tGot an error while reading {filename} -> {e} --> skipping file")
tests.test_prepdocs/test_compute_embedding_ratelimiterror
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<8>:<add> compute_embedding("foo", "ada", "text-ada-003") <del> compute_embedding("foo", "ada")
# module: tests.test_prepdocs def test_compute_embedding_ratelimiterror(monkeypatch, capsys): <0> monkeypatch.setattr(args, "verbose", True) <1> <2> def mock_create(*args, **kwargs): <3> raise openai.error.RateLimitError <4> <5> monkeypatch.setattr(openai.Embedding, "create", mock_create) <6> monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) <7> with pytest.raises(tenacity.RetryError): <8> compute_embedding("foo", "ada") <9> captured = capsys.readouterr() <10> assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 <11>
===========unchanged ref 0=========== at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.error RateLimitError(message=None, http_body=None, http_status=None, json_body=None, headers=None, code=None) at: scripts.prepdocs args = argparse.Namespace(verbose=False, openaihost="azure") args = parser.parse_args() compute_embedding(text, embedding_deployment, embedding_model) at: tenacity RetryError(last_attempt: "Future") ===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: tests.test_prepdocs def test_compute_embedding_success(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return { "object": "list", "data": [ { "object": "embedding", "embedding": [ 0.0023064255, -0.009327292, -0.0028842222, ], "index": 0, } ], "model": "text-embedding-ada-002", "usage": {"prompt_tokens": 8, "total_tokens": 8}, } monkeypatch.setattr(openai.Embedding, "create", mock_create) + assert compute_embedding("foo", "ada", "text-ada-003") == [ - assert compute_embedding("foo", "ada") == [ 0.0023064255, -0.009327292, -0.0028842222, ] ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 3=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 4=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 5=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section
tests.test_prepdocs/test_compute_embedding_autherror
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<8>:<add> compute_embedding("foo", "ada", "text-ada-003") <del> compute_embedding("foo", "ada")
# module: tests.test_prepdocs def test_compute_embedding_autherror(monkeypatch, capsys): <0> monkeypatch.setattr(args, "verbose", True) <1> <2> def mock_create(*args, **kwargs): <3> raise openai.error.AuthenticationError <4> <5> monkeypatch.setattr(openai.Embedding, "create", mock_create) <6> monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) <7> with pytest.raises(openai.error.AuthenticationError): <8> compute_embedding("foo", "ada") <9>
===========unchanged ref 0=========== at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.error AuthenticationError(message=None, http_body=None, http_status=None, json_body=None, headers=None, code=None) at: scripts.prepdocs args = argparse.Namespace(verbose=False, openaihost="azure") args = parser.parse_args() compute_embedding(text, embedding_deployment, embedding_model) ===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: tests.test_prepdocs def test_compute_embedding_ratelimiterror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.RateLimitError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(tenacity.RetryError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") captured = capsys.readouterr() assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 2=========== # module: tests.test_prepdocs def test_compute_embedding_success(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return { "object": "list", "data": [ { "object": "embedding", "embedding": [ 0.0023064255, -0.009327292, -0.0028842222, ], "index": 0, } ], "model": "text-embedding-ada-002", "usage": {"prompt_tokens": 8, "total_tokens": 8}, } monkeypatch.setattr(openai.Embedding, "create", mock_create) + assert compute_embedding("foo", "ada", "text-ada-003") == [ - assert compute_embedding("foo", "ada") == [ 0.0023064255, -0.009327292, -0.0028842222, ] ===========changed ref 3=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 4=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 5=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 6=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section
tests.conftest/mock_openai_embedding
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<1>:<add> if openai.api_type == "openai": <add> assert kwargs.get("deployment_id") is None <add> else: <add> assert kwargs.get("deployment_id") is not None
# module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): <0> async def mock_acreate(*args, **kwargs): <1> return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} <2> <3> monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) <4>
===========changed ref 0=========== # module: tests.conftest MockToken = namedtuple("MockToken", ["token", "expires_on"]) + envs = [ + { + "OPENAI_HOST": "openai", + "OPENAI_API_KEY": "secretkey", + "OPENAI_ORGANIZATION": "organization", + }, + { + "OPENAI_HOST": "azure", + "AZURE_OPENAI_SERVICE": "test-openai-service", + "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", + "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", + }, + ] + ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 2=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 3=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 4=========== # module: tests.test_prepdocs def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.AuthenticationError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(openai.error.AuthenticationError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") ===========changed ref 5=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 6=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 7=========== # module: tests.test_prepdocs def test_compute_embedding_ratelimiterror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.RateLimitError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(tenacity.RetryError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") captured = capsys.readouterr() assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14 ===========changed ref 8=========== # module: tests.test_prepdocs def test_compute_embedding_success(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): # From https://platform.openai.com/docs/api-reference/embeddings/create return { "object": "list", "data": [ { "object": "embedding", "embedding": [ 0.0023064255, -0.009327292, -0.0028842222, ], "index": 0, } ], "model": "text-embedding-ada-002", "usage": {"prompt_tokens": 8, "total_tokens": 8}, } monkeypatch.setattr(openai.Embedding, "create", mock_create) + assert compute_embedding("foo", "ada", "text-ada-003") == [ - assert compute_embedding("foo", "ada") == [ 0.0023064255, -0.009327292, -0.0028842222, ]
tests.conftest/mock_openai_chatcompletion
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<16>:<add> if openai.api_type == "openai": <add> assert kwargs.get("deployment_id") is None <add> else: <add> assert kwargs.get("deployment_id") is not None
# module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): <0> class AsyncChatCompletionIterator: <1> def __init__(self, answer): <2> self.num = 1 <3> self.answer = answer <4> <5> def __aiter__(self): <6> return self <7> <8> async def __anext__(self): <9> if self.num == 1: <10> self.num = 0 <11> return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) <12> else: <13> raise StopAsyncIteration <14> <15> async def mock_acreate(*args, **kwargs): <16> messages = kwargs["messages"] <17> if messages[-1]["content"] == "Generate search query for: What is the capital of France?": <18> answer = "capital of France" <19> else: <20> answer = "The capital of France is Paris." <21> if "stream" in kwargs and kwargs["stream"] is True: <22> return AsyncChatCompletionIterator(answer) <23> else: <24> return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]}) <25> <26> monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate) <27>
===========changed ref 0=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + if openai.api_type == "openai": + assert kwargs.get("deployment_id") is None + else: + assert kwargs.get("deployment_id") is not None return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 1=========== # module: tests.conftest MockToken = namedtuple("MockToken", ["token", "expires_on"]) + envs = [ + { + "OPENAI_HOST": "openai", + "OPENAI_API_KEY": "secretkey", + "OPENAI_ORGANIZATION": "organization", + }, + { + "OPENAI_HOST": "azure", + "AZURE_OPENAI_SERVICE": "test-openai-service", + "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", + "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", + }, + ] + ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 3=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 4=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 5=========== # module: tests.test_prepdocs def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.AuthenticationError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(openai.error.AuthenticationError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") ===========changed ref 6=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 7=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 8=========== # module: tests.test_prepdocs def test_compute_embedding_ratelimiterror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.RateLimitError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(tenacity.RetryError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") captured = capsys.readouterr() assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14
tests.conftest/client
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<4>:<del> monkeypatch.setenv("AZURE_OPENAI_SERVICE", "test-openai-service") <5>:<del> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "test-chatgpt") <7>:<add> for key, value in request.param.items(): <add> monkeypatch.setenv(key, value) <del> monkeypatch.setenv("AZURE_OPENAI_EMB_DEPLOYMENT", "test-ada")
# module: tests.conftest + @pytest_asyncio.fixture(params=envs) - @pytest_asyncio.fixture + async def client(monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): - async def client(monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_SERVICE", "test-openai-service") <5> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "test-chatgpt") <6> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <7> monkeypatch.setenv("AZURE_OPENAI_EMB_DEPLOYMENT", "test-ada") <8> <9> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <10> mock_default_azure_credential.return_value = MockAzureCredential() <11> quart_app = app.create_app() <12> <13> async with quart_app.test_app() as test_app: <14> quart_app.config.update({"TESTING": True}) <15> <16> yield test_app.test_client() <17>
===========changed ref 0=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + if openai.api_type == "openai": + assert kwargs.get("deployment_id") is None + else: + assert kwargs.get("deployment_id") is not None return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 1=========== # module: tests.conftest MockToken = namedtuple("MockToken", ["token", "expires_on"]) + envs = [ + { + "OPENAI_HOST": "openai", + "OPENAI_API_KEY": "secretkey", + "OPENAI_ORGANIZATION": "organization", + }, + { + "OPENAI_HOST": "azure", + "AZURE_OPENAI_SERVICE": "test-openai-service", + "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", + "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", + }, + ] + ===========changed ref 2=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): class AsyncChatCompletionIterator: def __init__(self, answer): self.num = 1 self.answer = answer def __aiter__(self): return self async def __anext__(self): if self.num == 1: self.num = 0 return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) else: raise StopAsyncIteration async def mock_acreate(*args, **kwargs): + if openai.api_type == "openai": + assert kwargs.get("deployment_id") is None + else: + assert kwargs.get("deployment_id") is not None messages = kwargs["messages"] if messages[-1]["content"] == "Generate search query for: What is the capital of France?": answer = "capital of France" else: answer = "The capital of France is Paris." if "stream" in kwargs and kwargs["stream"] is True: return AsyncChatCompletionIterator(answer) else: return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]}) monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate) ===========changed ref 3=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 4=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 5=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 6=========== # module: tests.test_prepdocs def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.AuthenticationError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(openai.error.AuthenticationError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") ===========changed ref 7=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 8=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<1>:<add> self.openai_host = openai_host <4>:<add> self.embedding_model = embedding_model
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def __init__( self, search_client: SearchClient, + openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): <0> self.search_client = search_client <1> self.chatgpt_deployment = chatgpt_deployment <2> self.chatgpt_model = chatgpt_model <3> self.embedding_deployment = embedding_deployment <4> self.sourcepage_field = sourcepage_field <5> self.content_field = content_field <6> self.chatgpt_token_limit = get_token_limit(chatgpt_model) <7>
===========unchanged ref 0=========== at: core.modelhelper get_token_limit(model_id: str) -> int ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 1=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 2=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + if openai.api_type == "openai": + assert kwargs.get("deployment_id") is None + else: + assert kwargs.get("deployment_id") is not None return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 3=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 4=========== # module: tests.test_prepdocs def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.AuthenticationError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(openai.error.AuthenticationError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") ===========changed ref 5=========== # module: tests.conftest MockToken = namedtuple("MockToken", ["token", "expires_on"]) + envs = [ + { + "OPENAI_HOST": "openai", + "OPENAI_API_KEY": "secretkey", + "OPENAI_ORGANIZATION": "organization", + }, + { + "OPENAI_HOST": "azure", + "AZURE_OPENAI_SERVICE": "test-openai-service", + "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", + "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", + }, + ] + ===========changed ref 6=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 7=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 8=========== # module: tests.test_prepdocs def test_compute_embedding_ratelimiterror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.RateLimitError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(tenacity.RetryError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") captured = capsys.readouterr() assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_until_final_call
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<19>:<add> chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} <20>:<add> **chatgpt_args, <del> deployment_id=self.chatgpt_deployment,
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> user_q = "Generate search query for: " + history[-1]["user"] <8> <9> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <10> messages = self.get_messages_from_history( <11> self.query_prompt_template, <12> self.chatgpt_model, <13> history, <14> user_q, <15> self.query_prompt_few_shots, <16> self.chatgpt_token_limit - len(user_q), <17> ) <18> <19> chat_completion = await openai.ChatCompletion.acreate( <20> deployment_id=self.chatgpt_deployment, <21> model=self.chatgpt_model, <22> messages=messages, <23> temperature=0.0, <24> max_tokens=32, <25> n=1, <26> ) <27> <28> query_text = chat_completion.choices[0].message.content <29> if query_text.strip() == "0": <30> query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query <31> <32> # STEP 2: Retrieve relevant documents from the search index with the GPT</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: # offset: 1 # If retrieval mode includes vectors, compute an embedding for the query if has_vector: embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r </s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: # offset: 2 <s>lines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) follow_up_questions_prompt = ( self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" ) # STEP 3: Generate a contextual and content specific answer using the search results and chat history # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") if prompt_override is None: system_message = self.system_message_chat_conversation.format( injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt ) elif prompt_override.startswith(">>>"): system_message = self.system_message_chat_conversation.format( injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt ) else: system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) messages = self.get_messages_from_history( system_message, self.chatgpt_model, history, # Model does not handle lengthy system messages well. # Moved sources to latest user conversation to solve follow up questions prompt. history[-1][</s> ===========below chunk 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_until_final_call( self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: # offset: 3 <s>"] + "\n\nSources:\n" + content, max_tokens=self.chatgpt_token_limit, ) msg_to_display = "\n\n".join([str(message) for message in messages]) extra_info = { "data_points": results, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace("\n", "<br>"), } chat_coroutine = openai.ChatCompletion.acreate( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1, stream=should_stream, ) return (extra_info, chat_coroutine) ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<1>:<add> self.openai_host = openai_host <add> self.chatgpt_deployment = chatgpt_deployment <del> self.openai_deployment = openai_deployment <3>:<add> self.embedding_model = embedding_model
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, + chatgpt_deployment: str, - openai_deployment: str, chatgpt_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): <0> self.search_client = search_client <1> self.openai_deployment = openai_deployment <2> self.chatgpt_model = chatgpt_model <3> self.embedding_deployment = embedding_deployment <4> self.sourcepage_field = sourcepage_field <5> self.content_field = content_field <6>
===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 1=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 2=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + if openai.api_type == "openai": + assert kwargs.get("deployment_id") is None + else: + assert kwargs.get("deployment_id") is not None return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 3=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def __init__( self, search_client: SearchClient, + openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client + self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 5=========== # module: tests.test_prepdocs def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.AuthenticationError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(openai.error.AuthenticationError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") ===========changed ref 6=========== # module: tests.conftest MockToken = namedtuple("MockToken", ["token", "expires_on"]) + envs = [ + { + "OPENAI_HOST": "openai", + "OPENAI_API_KEY": "secretkey", + "OPENAI_ORGANIZATION": "organization", + }, + { + "OPENAI_HOST": "azure", + "AZURE_OPENAI_SERVICE": "test-openai-service", + "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", + "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", + }, + ] + ===========changed ref 7=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 8=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None): - def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None): file_id = filename_to_id(filename) for i, (content, pagenum) in enumerate(split_text(page_map, filename)): section = { "id": f"{file_id}-page-{i}", "content": content, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename, } if use_vectors: + section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model) - section["embedding"] = compute_embedding(content, embedding_deployment) yield section ===========changed ref 9=========== # module: tests.test_prepdocs def test_compute_embedding_ratelimiterror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.RateLimitError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(tenacity.RetryError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") captured = capsys.readouterr() assert captured.out.count("Rate limited on the OpenAI embeddings API") == 14
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<9>:<add> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <add> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <del> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=q)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=q) <10> query_vector = embedding["data"][0]["embedding"] <11> else: <12> query_vector = None <13> <14> # Only keep the text query if the retrieval mode uses text, otherwise drop it <15> query_text = q if has_text else "" <16> <17> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <18> if overrides.get("semantic_ranker") and has_text: <19> r = await self.search_client.search( <20> query_text, <21> filter=filter, <22> query_type=QueryType.SEMANTIC, <23> query_language="en-us", <24> query_speller="lexicon", <25> semantic_configuration_name="default", <26> top=top, <27> query_caption="extractive|highlight-false" if use_semantic_captions else None, <28> vector=query_vector, <29> top_k=50 if query_vector else None, <30> vector_fields="embedding" if query_vector else None, <31> ) <32> else</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 1 r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) message_builder.append_message("user", self.question) messages = message_builder.messages chat_completion = await openai.ChatCompletion.acreate( deployment_id=self.openai_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) return { "data_points": results, "answer": chat_completion.choices[0].message.content, </s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 2 <s> return { "data_points": results, "answer": chat_completion.choices[0].message.content, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment ===========unchanged ref 1=========== self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.AskApproach run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": system_content}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" ===========unchanged ref 2=========== acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.app/ensure_openai_token
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<0>:<add> if openai.api_type != "azure_ad": <add> return
# module: app.backend.app @bp.before_request async def ensure_openai_token(): <0> openai_token = current_app.config[CONFIG_OPENAI_TOKEN] <1> if openai_token.expires_on < time.time() + 60: <2> openai_token = await current_app.config[CONFIG_CREDENTIAL].get_token( <3> "https://cognitiveservices.azure.com/.default" <4> ) <5> current_app.config[CONFIG_OPENAI_TOKEN] = openai_token <6> openai.api_key = openai_token.token <7>
===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" bp = Blueprint("routes", __name__, static_folder="static") at: openai api_key = os.environ.get("OPENAI_API_KEY") at: time time() -> float ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 1=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 2=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + if openai.api_type == "openai": + assert kwargs.get("deployment_id") is None + else: + assert kwargs.get("deployment_id") is not None return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 3=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, + chatgpt_deployment: str, - openai_deployment: str, chatgpt_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client + self.openai_host = openai_host + self.chatgpt_deployment = chatgpt_deployment - self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model + self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 4=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 5=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def __init__( self, search_client: SearchClient, + openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client + self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 6=========== # module: tests.test_prepdocs def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.AuthenticationError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(openai.error.AuthenticationError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") ===========changed ref 7=========== # module: tests.conftest MockToken = namedtuple("MockToken", ["token", "expires_on"]) + envs = [ + { + "OPENAI_HOST": "openai", + "OPENAI_API_KEY": "secretkey", + "OPENAI_ORGANIZATION": "organization", + }, + { + "OPENAI_HOST": "azure", + "AZURE_OPENAI_SERVICE": "test-openai-service", + "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", + "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", + }, + ] + ===========changed ref 8=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<5>:<add> # Shared by all OpenAI deployments <add> OPENAI_HOST = os.getenv("OPENAI_HOST", "azure") <del> AZURE_OPENAI_SERVICE = os.environ["AZURE_OPENAI_SERVICE"] <6>:<del> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ["AZURE_OPENAI_CHATGPT_DEPLOYMENT"] <7>:<add> OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <del> AZURE_OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <8>:<add> OPENAI_EMB_MODEL = os.getenv("AZURE_OPENAI_EMB_MODEL_NAME", "text-embedding-ada-002") <add> # Used with Azure OpenAI deployments <add> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <add> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <add> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") <del> AZURE_OPENAI_EMB_DEPLOYMENT = os.environ["AZURE_OPENAI_EMB_DEPLOYMENT"] <9>:<add> # Used only with non-Azure OpenAI deployments <add> OPENAI_API_KEY = os.getenv("OPENAI_API_KEY
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Replace these with your own values, either in environment variables or directly here <1> AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] <2> AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] <3> AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] <4> AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] <5> AZURE_OPENAI_SERVICE = os.environ["AZURE_OPENAI_SERVICE"] <6> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ["AZURE_OPENAI_CHATGPT_DEPLOYMENT"] <7> AZURE_OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <8> AZURE_OPENAI_EMB_DEPLOYMENT = os.environ["AZURE_OPENAI_EMB_DEPLOYMENT"] <9> <10> KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") <11> KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") <12> <13> # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, <14> # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the <15> # keys for each service <16> # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) <17> azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True) <18> <19> # Set up clients for Cognitive Search and Storage <20> search_client = SearchClient( <21> endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", <22> index_name=AZURE</s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 credential=azure_credential, ) blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential ) blob_container_client = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) # Used by the OpenAI SDK openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" openai.api_version = "2023-05-15" openai.api_type = "azure_ad" openai_token = await azure_credential.get_token("https://cognitiveservices.azure.com/.default") openai.api_key = openai_token.token # Store on app.config for later use inside requests current_app.config[CONFIG_OPENAI_TOKEN] = openai_token current_app.config[CONFIG_CREDENTIAL] = azure_credential current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), "rrr": ReadRetrieveReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, </s> ===========below chunk 1=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 2 <s>PLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), "rda": ReadDecomposeAsk( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), } current_app.config[CONFIG_CHAT_APPROACHES] = { "rrr": ChatReadRetrieveReadApproach( search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) } ===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" bp = Blueprint("routes", __name__, static_folder="static") at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.readdecomposeask ReadDecomposeAsk(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.readretrieveread ReadRetrieveReadApproach(search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str) at: openai api_key = os.environ.get("OPENAI_API_KEY") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) at: os environ = _createenviron() ===========unchanged ref 1=========== getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: app.backend.app @bp.before_request async def ensure_openai_token(): + if openai.api_type != "azure_ad": + return openai_token = current_app.config[CONFIG_OPENAI_TOKEN] if openai_token.expires_on < time.time() + 60: openai_token = await current_app.config[CONFIG_CREDENTIAL].get_token( "https://cognitiveservices.azure.com/.default" ) current_app.config[CONFIG_OPENAI_TOKEN] = openai_token openai.api_key = openai_token.token ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 2=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"]
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.__init__
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<2>:<add> self.openai_model = openai_model <3>:<add> self.embedding_model = embedding_model <5>:<add> self.openai_host = openai_host
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): <0> self.search_client = search_client <1> self.openai_deployment = openai_deployment <2> self.embedding_deployment = embedding_deployment <3> self.sourcepage_field = sourcepage_field <4> self.content_field = content_field <5>
===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 1=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 2=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + if openai.api_type == "openai": + assert kwargs.get("deployment_id") is None + else: + assert kwargs.get("deployment_id") is not None return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 3=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, + chatgpt_deployment: str, - openai_deployment: str, chatgpt_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client + self.openai_host = openai_host + self.chatgpt_deployment = chatgpt_deployment - self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model + self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 4=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 5=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def __init__( self, search_client: SearchClient, + openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client + self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 6=========== # module: tests.test_prepdocs def test_compute_embedding_autherror(monkeypatch, capsys): monkeypatch.setattr(args, "verbose", True) def mock_create(*args, **kwargs): raise openai.error.AuthenticationError monkeypatch.setattr(openai.Embedding, "create", mock_create) monkeypatch.setattr(tenacity.nap.time, "sleep", lambda x: None) with pytest.raises(openai.error.AuthenticationError): + compute_embedding("foo", "ada", "text-ada-003") - compute_embedding("foo", "ada") ===========changed ref 7=========== # module: app.backend.app @bp.before_request async def ensure_openai_token(): + if openai.api_type != "azure_ad": + return openai_token = current_app.config[CONFIG_OPENAI_TOKEN] if openai_token.expires_on < time.time() + 60: openai_token = await current_app.config[CONFIG_CREDENTIAL].get_token( "https://cognitiveservices.azure.com/.default" ) current_app.config[CONFIG_OPENAI_TOKEN] = openai_token openai.api_key = openai_token.token ===========changed ref 8=========== # module: tests.conftest MockToken = namedtuple("MockToken", ["token", "expires_on"]) + envs = [ + { + "OPENAI_HOST": "openai", + "OPENAI_API_KEY": "secretkey", + "OPENAI_ORGANIZATION": "organization", + }, + { + "OPENAI_HOST": "azure", + "AZURE_OPENAI_SERVICE": "test-openai-service", + "AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt", + "AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada", + }, + ] + ===========changed ref 9=========== # module: scripts.prepdocs + args = argparse.Namespace(verbose=False, openaihost="azure") - args = argparse.Namespace(verbose=False) MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.search
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<9>:<add> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <add> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) <del> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text)
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text) <10> query_vector = embedding["data"][0]["embedding"] <11> else: <12> query_vector = None <13> <14> # Only keep the text query if the retrieval mode uses text, otherwise drop it <15> if not has_text: <16> query_text = "" <17> <18> if overrides.get("semantic_ranker") and has_text: <19> r = await self.search_client.search( <20> query_text, <21> filter=filter, <22> query_type=QueryType.SEMANTIC, <23> query_language="en-us", <24> query_speller="lexicon", <25> semantic_configuration_name="default", <26> top=top, <27> query_caption="extractive|highlight-false" if use_semantic_captions else None, <28> vector=query_vector, <29> top_k=50 if query_vector else None, <30> vector_fields="embedding" if query_vector else None, <31> ) <32> else: <33> r = await self.search_client.search( </s>
===========below chunk 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: # offset: 1 filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) async for doc in r] return results, "\n".join(results) ===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, openai_deployment: str, + openai_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client self.openai_deployment = openai_deployment + self.openai_model = openai_model self.embedding_deployment = embedding_deployment + self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field + self.openai_host = openai_host ===========changed ref 2=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) + def compute_embedding(text, embedding_deployment, embedding_model): - def compute_embedding(text, embedding_deployment): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} + return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] - return openai.Embedding.create(engine=embedding_deployment, input=text)["data"][0]["embedding"] ===========changed ref 3=========== # module: tests.conftest @pytest.fixture def mock_openai_embedding(monkeypatch): async def mock_acreate(*args, **kwargs): + if openai.api_type == "openai": + assert kwargs.get("deployment_id") is None + else: + assert kwargs.get("deployment_id") is not None return {"data": [{"embedding": [0.1, 0.2, 0.3]}]} monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate) ===========changed ref 4=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): def __init__( self, search_client: SearchClient, + openai_host: str, + chatgpt_deployment: str, - openai_deployment: str, chatgpt_model: str, embedding_deployment: str, + embedding_model: str, sourcepage_field: str, content_field: str, ): self.search_client = search_client + self.openai_host = openai_host + self.chatgpt_deployment = chatgpt_deployment - self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model + self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 5=========== <s>retry( + retry=retry_if_exception_type(openai.error.RateLimitError), + wait=wait_random_exponential(min=15, max=60), + stop=stop_after_attempt(15), + before_sleep=before_retry_sleep, + ) - @retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} + emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) - emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts) return [data.embedding for data in emb_response.data]
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.run
Modified
Azure-Samples~azure-search-openai-demo
62e22486632194ca387ba3c7187c4db4db8a5425
Adding support for non-Azure openai instances (#507)
<11>:<add> if self.openai_host == "azure": <add> llm = AzureOpenAI( <del> llm = AzureOpenAI( <12>:<add> deployment_name=self.openai_deployment, <del> deployment_name=self.openai_deployment, <13>:<add> temperature=overrides.get("temperature", 0.3), <del> temperature=overrides.get("temperature") or 0.3, <14>:<add> openai_api_key=openai.api_key, <del> openai_api_key=openai.api_key, <15>:<add> ) <del> ) <16>:<add> else: <add> llm = OpenAI( <add> model_name=self.openai_model, <add> temperature=overrides.get("temperature", 0.3), <add> openai_api_key=openai.api_key, <add> )
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: <0> search_results = None <1> <2> async def search_and_store(q: str) -> Any: <3> nonlocal search_results <4> search_results, content = await self.search(q, overrides) <5> return content <6> <7> # Use to capture thought process during iterations <8> cb_handler = HtmlCallbackHandler() <9> cb_manager = CallbackManager(handlers=[cb_handler]) <10> <11> llm = AzureOpenAI( <12> deployment_name=self.openai_deployment, <13> temperature=overrides.get("temperature") or 0.3, <14> openai_api_key=openai.api_key, <15> ) <16> tools = [ <17> Tool( <18> name="Search", <19> func=lambda _: "Not implemented", <20> coroutine=search_and_store, <21> description="useful for when you need to ask with search", <22> callbacks=cb_manager, <23> ), <24> Tool( <25> name="Lookup", <26> func=lambda _: "Not implemented", <27> coroutine=self.lookup, <28> description="useful for when you need to ask with lookup", <29> callbacks=cb_manager, <30> ), <31> ] <32> <33> prompt_prefix = overrides.get("prompt_template") <34> prompt = PromptTemplate.from_examples( <35> EXAMPLES, <36> SUFFIX, <37> ["input", "agent_scratchpad"], <38> prompt_prefix + "\n\n" + PREFIX if prompt_prefix else PREFIX, <39> ) <40> <41> class ReAct(ReActDocstoreAgent): <42> @classmethod <43> def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: <44> return prompt <45> <46> agent = ReAct.from_llm_and_tools(llm, tools) <47> chain =</s>
===========below chunk 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 1 result = await chain.arun(q) # Replace substrings of the form <file.ext> with [file.ext] so that the frontend can render them as links, match them with a regex to avoid # generalizing too much and disrupt HTML snippets if present result = re.sub(r"<([a-zA-Z0-9_ \-\.]+)>", r"[\1]", result) return {"data_points": search_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask EXAMPLES = [ """Question: What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into? Thought: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado orogeny extends into, then find the elevation range of the area. Action: Search[Colorado orogeny] Observation: <info1.pdf> The Colorado orogeny was an episode of mountain building (an orogeny) in Colorado and surrounding areas. Thought: It does not mention the eastern sector. So I need to look up eastern sector. Action: Lookup[eastern sector] Observation: <info2.txt> (Result 1 / 1) The eastern sector extends into the High Plains and is called the Central Plains orogeny. Thought: The eastern sector of Colorado orogeny extends into the High Plains. So I need to search High Plains and find its elevation range. Action: Search[High Plains] Observation: <some_file.pdf> High Plains refers to one of two distinct land regions Thought: I need to instead search High Plains (United States). Action: Search[High Plains (United States)] Observation: <filea.pdf> The High Plains are a subregion of the Great Plains. <another-ref.docx> From east to west, the High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130 m). Thought: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft. Action: Finish[1,800 to 7,000 ft <filea.pdf>]""", """Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons" character Milhouse, who</s> ===========unchanged ref 1=========== SUFFIX = """\nQuestion: {input} {agent_scratchpad}""" PREFIX = ( "Answer questions as shown in the following examples, by splitting the question into individual search or lookup actions to find facts until you can answer the question. " "Observations are prefixed by their source name in angled brackets, source names MUST be included with the actions in the answers." "All questions must be answered from the results from search or look up actions, only facts resulting from those can be used in an answer. " ) at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str] search(query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str] lookup(q: str) -> Optional[str] at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.__init__ self.openai_deployment = openai_deployment at: approaches.approach.AskApproach run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any] at: langchainadapters.HtmlCallbackHandler html: str = "" get_and_reset_log() -> str at: openai api_key = os.environ.get("OPENAI_API_KEY") ===========unchanged ref 2=========== at: re sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # If retrieval mode includes vectors, compute an embedding for the query if has_vector: + embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} + embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) - embedding = await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = "" if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_</s>
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
7b7e6cedad841c5816abf7900cb42a0bdf471ea5
Stabilize search query generation (#652)
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Replace these with your own values, either in environment variables or directly here <1> AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] <2> AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] <3> AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] <4> AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] <5> # Shared by all OpenAI deployments <6> OPENAI_HOST = os.getenv("OPENAI_HOST", "azure") <7> OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <8> OPENAI_EMB_MODEL = os.getenv("AZURE_OPENAI_EMB_MODEL_NAME", "text-embedding-ada-002") <9> # Used with Azure OpenAI deployments <10> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <11> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <12> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") <13> # Used only with non-Azure OpenAI deployments <14> OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") <15> OPENAI_ORGANIZATION = os.getenv("OPENAI_ORGANIZATION") <16> <17> KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") <18> KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") <19> <20> # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, <21> # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances</s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 # keys for each service # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True) # Set up clients for Cognitive Search and Storage search_client = SearchClient( endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", index_name=AZURE_SEARCH_INDEX, credential=azure_credential, ) blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential ) blob_container_client = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) # Used by the OpenAI SDK if OPENAI_HOST == "azure": openai.api_type = "azure_ad" openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" openai.api_version = "2023-05-15" openai_token = await azure_credential.get_token("https://cognitiveservices.azure.com/.default") openai.api_key = openai_token.token # Store on app.config for later use inside requests current_app.config[CONFIG_OPENAI_TOKEN] = openai_token else: openai.api_type = "openai" openai.api_key = OPENAI_API_KEY openai.organization = OPENAI_ORGANIZATION current_app.config[CONFIG_CREDENTIAL] = azure_credential current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these</s> ===========below chunk 1=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 2 <s> blob_container_client # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), "rrr": ReadRetrieveReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), "rda": ReadDecomposeAsk( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), } current_app.config[CONFIG_CHAT_APPROACHES] = { "rrr": ChatReadRetrieveReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_</s> ===========below chunk 2=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 3 <s> AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) } ===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" bp = Blueprint("routes", __name__, static_folder="static") at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: approaches.readdecomposeask ReadDecomposeAsk(search_client: SearchClient, openai_host: str, openai_deployment: str, openai_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: approaches.readretrieveread ReadRetrieveReadApproach(search_client: SearchClient, openai_host: str, openai_deployment: str, openai_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: openai api_key = os.environ.get("OPENAI_API_KEY") organization = os.environ.get("OPENAI_ORGANIZATION") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") ===========unchanged ref 1=========== api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) at: os environ = _createenviron() getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_without_streaming
Modified
Azure-Samples~azure-search-openai-demo
7b7e6cedad841c5816abf7900cb42a0bdf471ea5
Stabilize search query generation (#652)
<1>:<add> chat_resp = await chat_coroutine <add> chat_content = chat_resp.choices[0].message.content <del> chat_content = (await chat_coroutine).choices[0].message.content
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_without_streaming(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> dict[str, Any]: <0> extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=False) <1> chat_content = (await chat_coroutine).choices[0].message.content <2> extra_info["answer"] = chat_content <3> return extra_info <4>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.chatgpt_token_limit = get_token_limit(chatgpt_model) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run_until_final_call content = "\n".join(results) messages = self.get_messages_from_history( system_message, self.chatgpt_model, history, # Model does not handle lengthy system messages well. # Moved sources to latest user conversation to solve follow up questions prompt. history[-1]["user"] + "\n\nSources:\n" + content, max_tokens=self.chatgpt_token_limit, ) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: # Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" + + NO_RESPONSE = "0" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: # offset: 1 <s> asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. + You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] ===========changed ref 2=========== + # module: tests.test_chatapproach + + ===========changed ref 3=========== + # module: tests.test_chatapproach + def test_get_search_query_returns_default(): + chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") + + payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant"},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' + default_query = "hello" + query = chat_approach.get_search_query(json.loads(payload), default_query) + + assert query == default_query + ===========changed ref 4=========== + # module: tests.test_chatapproach + def test_get_search_query(): + chat_approach = ChatReadRetrieveReadApproach(None, "", "gpt-35-turbo", "gpt-35-turbo", "", "", "", "") + + payload = '{"id":"chatcmpl-81JkxYqYppUkPtOAia40gki2vJ9QM","object":"chat.completion","created":1695324963,"model":"gpt-35-turbo","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"choices":[{"index":0,"finish_reason":"function_call","message":{"role":"assistant","function_call":{"name":"search_sources","arguments":"{\\n\\"search_query\\":\\"accesstelemedicineservices\\"\\n}"}},"content_filter_results":{}}],"usage":{"completion_tokens":19,"prompt_tokens":425,"total_tokens":444}}' + default_query = "hello" + query = chat_approach.get_search_query(json.loads(payload), default_query) + + assert query == "accesstelemedicineservices" +
tests.conftest/mock_openai_chatcompletion
Modified
Azure-Samples~azure-search-openai-demo
f3cc2ad9c7de12aaa575cd5c4252efe1eaa400bd
Hotfix: Workaround for stream bug (#664)
<2>:<add> self.num = 2 <del> self.num = 1 <9>:<add> if self.num == 2: <add> self.num -= 1 <add> # Emulate the first response being empty - bug with "2023-07-01-preview" <add> return openai.util.convert_to_openai_object({"choices": []}) <add> elif self.num == 1: <del> if self.num == 1: <10>:<add> self.num -= 1 <del> self.num = 0
# module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): <0> class AsyncChatCompletionIterator: <1> def __init__(self, answer): <2> self.num = 1 <3> self.answer = answer <4> <5> def __aiter__(self): <6> return self <7> <8> async def __anext__(self): <9> if self.num == 1: <10> self.num = 0 <11> return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) <12> else: <13> raise StopAsyncIteration <14> <15> async def mock_acreate(*args, **kwargs): <16> if openai.api_type == "openai": <17> assert kwargs.get("deployment_id") is None <18> else: <19> assert kwargs.get("deployment_id") is not None <20> messages = kwargs["messages"] <21> if messages[-1]["content"] == "Generate search query for: What is the capital of France?": <22> answer = "capital of France" <23> else: <24> answer = "The capital of France is Paris." <25> if "stream" in kwargs and kwargs["stream"] is True: <26> return AsyncChatCompletionIterator(answer) <27> else: <28> return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]}) <29> <30> monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate) <31>
===========unchanged ref 0=========== at: _pytest.fixtures fixture(fixture_function: FixtureFunction, *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=...) -> FixtureFunction fixture(fixture_function: None=..., *, scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]"=..., params: Optional[Iterable[object]]=..., autouse: bool=..., ids: Optional[ Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] ]=..., name: Optional[str]=None) -> FixtureFunctionMarker at: _pytest.monkeypatch monkeypatch() -> Generator["MonkeyPatch", None, None] at: openai api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") at: openai.util convert_to_openai_object(resp, api_key=None, api_version=None, organization=None, engine=None, plain_old_data=False) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_with_streaming
Modified
Azure-Samples~azure-search-openai-demo
f3cc2ad9c7de12aaa575cd5c4252efe1eaa400bd
Hotfix: Workaround for stream bug (#664)
<3>:<add> # "2023-07-01-preview" API version has a bug where first response has empty choices <add> if event["choices"]: <add> yield event <del> yield event
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach: def run_with_streaming( self, history: list[dict[str, str]], overrides: dict[str, Any] ) -> AsyncGenerator[dict, None]: <0> extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=True) <1> yield extra_info <2> async for event in await chat_coroutine: <3> yield event <4>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool=False) -> tuple at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: tests.conftest @pytest.fixture def mock_openai_chatcompletion(monkeypatch): class AsyncChatCompletionIterator: def __init__(self, answer): + self.num = 2 - self.num = 1 self.answer = answer def __aiter__(self): return self async def __anext__(self): + if self.num == 2: + self.num -= 1 + # Emulate the first response being empty - bug with "2023-07-01-preview" + return openai.util.convert_to_openai_object({"choices": []}) + elif self.num == 1: - if self.num == 1: + self.num -= 1 - self.num = 0 return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]}) else: raise StopAsyncIteration async def mock_acreate(*args, **kwargs): if openai.api_type == "openai": assert kwargs.get("deployment_id") is None else: assert kwargs.get("deployment_id") is not None messages = kwargs["messages"] if messages[-1]["content"] == "Generate search query for: What is the capital of France?": answer = "capital of France" else: answer = "The capital of France is Paris." if "stream" in kwargs and kwargs["stream"] is True: return AsyncChatCompletionIterator(answer) else: return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]}) monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate)
scripts.prepdocs/compute_embedding
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<1>:<add> embedding_args = {"deployment_id": embedding_deployment} if args.openaihost != "openai" else {} <del> embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {}
# module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) def compute_embedding(text, embedding_deployment, embedding_model): <0> refresh_openai_token() <1> embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} <2> return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] <3>
===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() blob_name_from_file_page(filename, page=0) compute_embedding(text, embedding_deployment, embedding_model) at: scripts.prepdocs.create_sections file_id = filename_to_id(filename) ===========changed ref 0=========== # module: scripts.prepdocs + args = argparse.Namespace( + verbose=False, + openaihost="azure", + datalakestorageaccount=None, + datalakefilesystem=None, + datalakepath=None, + remove=False, + useacls=False, + skipblobs=False, + storageaccount=None, + container=None, + ) + adls_gen2_creds = None + storage_creds = None - args = argparse.Namespace(verbose=False, openaihost="azure") MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
scripts.prepdocs/compute_embedding_in_batch
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<1>:<add> embedding_args = {"deployment_id": args.openaideployment} if args.openaihost != "openai" else {} <del> embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {}
# module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) def compute_embedding_in_batch(texts): <0> refresh_openai_token() <1> embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} <2> emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) <3> return [data.embedding for data in emb_response.data] <4>
===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() at: tenacity retry(stop: "StopBaseT"=stop_never, wait: "WaitBaseT"=wait_none(), retry: "RetryBaseT"=retry_if_exception_type(), before: t.Callable[["RetryCallState"], None]=before_nothing, after: t.Callable[["RetryCallState"], None]=after_nothing, before_sleep: t.Optional[t.Callable[["RetryCallState"], None]]=None, reraise: bool=False, retry_error_cls: t.Type[RetryError]=RetryError, retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]]=None, *, sleep: t.Callable[[t.Union[int, float]], None]=sleep) -> t.Any ===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) def compute_embedding(text, embedding_deployment, embedding_model): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost != "openai" else {} - embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: scripts.prepdocs + args = argparse.Namespace( + verbose=False, + openaihost="azure", + datalakestorageaccount=None, + datalakefilesystem=None, + datalakepath=None, + remove=False, + useacls=False, + skipblobs=False, + storageaccount=None, + container=None, + ) + adls_gen2_creds = None + storage_creds = None - args = argparse.Namespace(verbose=False, openaihost="azure") MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
scripts.prepdocs/create_search_index
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<5>:<add> fields = [ <add> SimpleField(name="id", type="Edm.String", key=True), <add> SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), <add> SearchField( <add> name="embedding", <add> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <add> hidden=False, <add> searchable=True, <add> filterable=False, <add> sortable=False, <add> facetable=False, <add> vector_search_dimensions=1536, <add> vector_search_configuration="default", <add> ), <add> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <add> SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), <add> SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), <add> ] <add> if args.useacls: <add> fields.
# module: scripts.prepdocs def create_search_index(): <0> if args.verbose: <1> print(f"Ensuring search index {args.index} exists") <2> index_client = SearchIndexClient( <3> endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds <4> ) <5> if args.index not in index_client.list_index_names(): <6> index = SearchIndex( <7> name=args.index, <8> fields=[ <9> SimpleField(name="id", type="Edm.String", key=True), <10> SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), <11> SearchField( <12> name="embedding", <13> type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <14> hidden=False, <15> searchable=True, <16> filterable=False, <17> sortable=False, <18> facetable=False, <19> vector_search_dimensions=1536, <20> vector_search_configuration="default", <21> ), <22> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <23> SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), <24> SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), <25> ], <26> semantic_settings=SemanticSettings( <27> configurations=[ <28> SemanticConfiguration( <29> name="default", <30> prioritized_fields=PrioritizedFields( <31> title_field=None, prioritized_content_fields=[SemanticField(field_name="content")] <32> ), <33> ) <34> ] <35> ), <36> vector_search=VectorSearch( <37> algorithm_configurations=[ <38> VectorSearchAlgorithmConfiguration( <39> name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine") <40> ) <41> ] <42> ), </s>
===========below chunk 0=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists") ===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() before_retry_sleep(retry_state) at: tenacity retry(stop: "StopBaseT"=stop_never, wait: "WaitBaseT"=wait_none(), retry: "RetryBaseT"=retry_if_exception_type(), before: t.Callable[["RetryCallState"], None]=before_nothing, after: t.Callable[["RetryCallState"], None]=after_nothing, before_sleep: t.Optional[t.Callable[["RetryCallState"], None]]=None, reraise: bool=False, retry_error_cls: t.Type[RetryError]=RetryError, retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]]=None, *, sleep: t.Callable[[t.Union[int, float]], None]=sleep) -> t.Any ===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) def compute_embedding(text, embedding_deployment, embedding_model): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost != "openai" else {} - embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost != "openai" else {} - embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 2=========== # module: scripts.prepdocs + args = argparse.Namespace( + verbose=False, + openaihost="azure", + datalakestorageaccount=None, + datalakefilesystem=None, + datalakepath=None, + remove=False, + useacls=False, + skipblobs=False, + storageaccount=None, + container=None, + ) + adls_gen2_creds = None + storage_creds = None - args = argparse.Namespace(verbose=False, openaihost="azure") MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
scripts.prepdocs/index_sections
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<8>:<add> if acls: <add> s.update(acls)
# module: scripts.prepdocs + def index_sections(filename, sections, acls=None): - def index_sections(filename, sections): <0> if args.verbose: <1> print(f"Indexing sections from '{filename}' into search index '{args.index}'") <2> search_client = SearchClient( <3> endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds <4> ) <5> i = 0 <6> batch = [] <7> for s in sections: <8> batch.append(s) <9> i += 1 <10> if i % 1000 == 0: <11> results = search_client.upload_documents(documents=batch) <12> succeeded = sum([1 for r in results if r.succeeded]) <13> if args.verbose: <14> print(f"\tIndexed {len(results)} sections, {succeeded} succeeded") <15> batch = [] <16> <17> if len(batch) > 0: <18> results = search_client.upload_documents(documents=batch) <19> succeeded = sum([1 for r in results if r.succeeded]) <20> if args.verbose: <21> print(f"\tIndexed {len(results)} sections, {succeeded} succeeded") <22>
===========unchanged ref 0=========== at: scripts.prepdocs args = argparse.Namespace( verbose=False, openaihost="azure", datalakestorageaccount=None, datalakefilesystem=None, datalakepath=None, remove=False, useacls=False, skipblobs=False, storageaccount=None, container=None, ) args = parser.parse_args() ===========changed ref 0=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) def compute_embedding(text, embedding_deployment, embedding_model): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost != "openai" else {} - embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] ===========changed ref 1=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) def compute_embedding_in_batch(texts): refresh_openai_token() + embedding_args = {"deployment_id": args.openaideployment} if args.openaihost != "openai" else {} - embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {} emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts) return [data.embedding for data in emb_response.data] ===========changed ref 2=========== # module: scripts.prepdocs + args = argparse.Namespace( + verbose=False, + openaihost="azure", + datalakestorageaccount=None, + datalakefilesystem=None, + datalakepath=None, + remove=False, + useacls=False, + skipblobs=False, + storageaccount=None, + container=None, + ) + adls_gen2_creds = None + storage_creds = None - args = argparse.Namespace(verbose=False, openaihost="azure") MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 open_ai_token_cache = {} CACHE_KEY_TOKEN_CRED = "openai_token_cred" CACHE_KEY_CREATED_TIME = "created_time" CACHE_KEY_TOKEN_TYPE = "token_type" # Embedding batch support section SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}} ===========changed ref 3=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient( endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds ) + fields = [ + SimpleField(name="id", type="Edm.String", key=True), + SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), + SearchField( + name="embedding", + type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + hidden=False, + searchable=True, + filterable=False, + sortable=False, + facetable=False, + vector_search_dimensions=1536, + vector_search_configuration="default", + ), + SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), + SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), + SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), + ] + if args.useacls: + fields.append( + SimpleField(name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) + ) + fields.append( + SimpleField(name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True) + ) + if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, + fields=fields, - fields=[ - SimpleField(name="id", type="Edm.String", key=True), - SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), - SearchField( - name="</s> ===========changed ref 4=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s>="content", type="Edm.String", analyzer_name="en.microsoft"), - SearchField( - name="embedding", - type=SearchFieldDataType.Collection(SearchFieldDataType.Single), - hidden=False, - searchable=True, - filterable=False, - sortable=False, - facetable=False, - vector_search_dimensions=1536, - vector_search_configuration="default", - ), - SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), - SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), - SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True), - ], semantic_settings=SemanticSettings( configurations=[ SemanticConfiguration( name="default", prioritized_fields=PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name="content")] ), ) ] ), vector_search=VectorSearch( algorithm_configurations=[ VectorSearchAlgorithmConfiguration( name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine") ) ] ), ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists")
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.retrieve
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<3>:<add> top = overrides.get("top", 3) <del> top = overrides.get("top") or 3 <4>:<del> exclude_category = overrides.get("exclude_category") or None <5>:<del> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6>:<add> filter = self.build_filter(overrides, auth_claims)
# module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): + def retrieve(self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <10> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) <11> query_vector = embedding["data"][0]["embedding"] <12> else: <13> query_vector = None <14> <15> # Only keep the text query if the retrieval mode uses text, otherwise drop it <16> if not has_text: <17> query_text = "" <18> <19> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <20> if overrides.get("semantic_ranker") and has_text: <21> r = await self.search_client.search( <22> query_text, <23> filter=filter, <24> query_type=QueryType.SEMANTIC, <25> query_language="en-us", <26> query_speller="lexicon", <27> semantic_configuration_name="default", <28> top=top,</s>
===========below chunk 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): + def retrieve(self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 1 vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r] content = "\n".join(results) return results, content ===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " "Answer the question using only the data provided in the information sources below. " "For tabular information return it as an html table. Do not return markdown format. " "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " 'For example, if the question is "What color is the sky?" and one of the information sources says "info123: the sky is blue whenever it\'s not cloudy", then answer with "The sky is blue [info123]" ' 'It\'s important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (":"). ' 'If there are multiple sources, cite each one in their own square brackets. For example, use "[info343][ref-76]" and not "[info343,ref-76]". ' "Never quote tool names as sources." "If you cannot answer using the sources below, say that you don't know. " "\n\nYou can access to the following tools:" ) template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.openai_host = openai_host at: approaches.approach.Approach build_filter(overrides: dict[str, Any], auth_claims: dict[str, Any]) -> str ===========unchanged ref 1=========== at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== + # module: tests.test_authenticationhelper + + ===========changed ref 1=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_success(mock_list_groups_success): + groups = await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert groups == ["OVERAGE_GROUP_Y", "OVERAGE_GROUP_Z"] + ===========changed ref 2=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_overage_unauthorized(mock_confidential_client_overage, mock_list_groups_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 + ===========changed ref 3=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_unauthorized(mock_confidential_client_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 + ===========changed ref 4=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_unauthorized(mock_list_groups_unauthorized): + with pytest.raises(AuthError) as exc_info: + await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert exc_info.value.error == '{"error": "unauthorized"}' + ===========changed ref 5=========== + # module: tests.test_authenticationhelper + def create_authentication_helper(): + return AuthenticationHelper( + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + token_cache_path=None, + ) + ===========changed ref 6=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_success(mock_confidential_client_success): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert auth_claims.get("oid") == "OID_X" + assert auth_claims.get("groups") == ["GROUP_Y", "GROUP_Z"] + ===========changed ref 7=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_overage_success(mock_confidential_client_overage, mock_list_groups_success): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert auth_claims.get("oid") == "OID_X" + assert auth_claims.get("groups") == ["OVERAGE_GROUP_Y", "OVERAGE_GROUP_Z"] +
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<4>:<add> retrieve_results, content = await self.retrieve(q, overrides, auth_claims) <del> retrieve_results, content = await self.retrieve(q, overrides)
# module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): + def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: <0> retrieve_results = None <1> <2> async def retrieve_and_store(q: str) -> Any: <3> nonlocal retrieve_results <4> retrieve_results, content = await self.retrieve(q, overrides) <5> return content <6> <7> # Use to capture thought process during iterations <8> cb_handler = HtmlCallbackHandler() <9> cb_manager = CallbackManager(handlers=[cb_handler]) <10> <11> acs_tool = Tool( <12> name="CognitiveSearch", <13> func=lambda _: "Not implemented", <14> coroutine=retrieve_and_store, <15> description=self.CognitiveSearchToolDescription, <16> callbacks=cb_manager, <17> ) <18> employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) <19> tools = [acs_tool, employee_tool] <20> <21> prompt = ZeroShotAgent.create_prompt( <22> tools=tools, <23> prefix=overrides.get("prompt_template_prefix") or self.template_prefix, <24> suffix=overrides.get("prompt_template_suffix") or self.template_suffix, <25> input_variables=["input", "agent_scratchpad"], <26> ) <27> if self.openai_type == "azure": <28> llm = AzureOpenAI( <29> deployment_name=self.openai_deployment, <30> temperature=overrides.get("temperature", 0.3), <31> openai_api_key=openai.api_key, <32> ) <33> else: <34> llm = OpenAI( <35> model_name=self.openai_model, <36> temperature=overrides.get("temperature", 0.3), <37> openai</s>
===========below chunk 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): + def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 1 ) chain = LLMChain(llm=llm, prompt=prompt) agent_exec = AgentExecutor.from_agent_and_tools( agent=ZeroShotAgent(llm_chain=chain), tools=tools, verbose=True, callback_manager=cb_manager ) result = await agent_exec.arun(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread EmployeeInfoTool(employee_name: str, callbacks: Callbacks=None) at: openai api_key = os.environ.get("OPENAI_API_KEY") ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): + def retrieve(self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False + top = overrides.get("top", 3) - top = overrides.get("top") or 3 - exclude_category = overrides.get("exclude_category") or None - filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None + filter = self.build_filter(overrides, auth_claims) # If retrieval mode includes vectors, compute an embedding for the query if has_vector: embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = "" # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", </s> ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(AskApproach): + def retrieve(self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s>_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r] content = "\n".join(results) return results, content ===========changed ref 2=========== + # module: tests.test_authenticationhelper + + ===========changed ref 3=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_success(mock_list_groups_success): + groups = await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert groups == ["OVERAGE_GROUP_Y", "OVERAGE_GROUP_Z"] + ===========changed ref 4=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_overage_unauthorized(mock_confidential_client_overage, mock_list_groups_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 + ===========changed ref 5=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_unauthorized(mock_confidential_client_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 + ===========changed ref 6=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_unauthorized(mock_list_groups_unauthorized): + with pytest.raises(AuthError) as exc_info: + await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert exc_info.value.error == '{"error": "unauthorized"}' + ===========changed ref 7=========== + # module: tests.test_authenticationhelper + def create_authentication_helper(): + return AuthenticationHelper( + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + token_cache_path=None, + ) + ===========changed ref 8=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_success(mock_confidential_client_success): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert auth_claims.get("oid") == "OID_X" + assert auth_claims.get("groups") == ["GROUP_Y", "GROUP_Z"] +
tests.conftest/client
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<7>:<add> if os.getenv("AZURE_USE_AUTHENTICATION") is not None: <add> monkeypatch.delenv("AZURE_USE_AUTHENTICATION")
# module: tests.conftest @pytest_asyncio.fixture(params=envs) async def client(monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <5> for key, value in request.param.items(): <6> monkeypatch.setenv(key, value) <7> <8> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <9> mock_default_azure_credential.return_value = MockAzureCredential() <10> quart_app = app.create_app() <11> <12> async with quart_app.test_app() as test_app: <13> quart_app.config.update({"TESTING": True}) <14> <15> yield test_app.test_client() <16>
===========changed ref 0=========== # module: tests.conftest + @pytest.fixture + def mock_acs_search_filter(monkeypatch): + class AsyncSearchResultsIterator: + def __init__(self): + self.num = 1 + + def __aiter__(self): + return self + + async def __anext__(self): + raise StopAsyncIteration + + async def mock_search(self, *args, **kwargs): + self.filter = kwargs.get("filter") + return AsyncSearchResultsIterator() + + monkeypatch.setattr(SearchClient, "search", mock_search) + ===========changed ref 1=========== + # module: tests.test_adlsgen2setup + + ===========changed ref 2=========== + # module: tests.test_authenticationhelper + + ===========changed ref 3=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + """ + Sets up a Data Lake Storage Gen 2 account with sample data and access control + """ + ===========changed ref 4=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + def create_service_client(self): + return DataLakeServiceClient( + account_url=f"https://{self.storage_account_name}.dfs.core.windows.net", credential=self.credentials + ) + ===========changed ref 5=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_success(mock_list_groups_success): + groups = await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert groups == ["OVERAGE_GROUP_Y", "OVERAGE_GROUP_Z"] + ===========changed ref 6=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_overage_unauthorized(mock_confidential_client_overage, mock_list_groups_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 + ===========changed ref 7=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_unauthorized(mock_confidential_client_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 + ===========changed ref 8=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + def upload_file(self, directory_client: DataLakeDirectoryClient, file_path: str): + with open(file=file_path, mode="rb") as f: + file_client = directory_client.get_file_client(file=os.path.basename(file_path)) + await file_client.upload_data(f, overwrite=True) + ===========changed ref 9=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_unauthorized(mock_list_groups_unauthorized): + with pytest.raises(AuthError) as exc_info: + await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert exc_info.value.error == '{"error": "unauthorized"}' + ===========changed ref 10=========== + # module: tests.test_adlsgen2setup + @pytest.fixture + def mock_get_group_missing(monkeypatch): + def mock_get(*args, **kwargs): + return MockResponse( + text=json.dumps({"value": []}), + status=200, + ) + + monkeypatch.setattr(aiohttp.ClientSession, "get", mock_get) + ===========changed ref 11=========== + # module: tests.test_authenticationhelper + def create_authentication_helper(): + return AuthenticationHelper( + use_authentication=True, + server_app_id="SERVER_APP", + server_app_secret="SERVER_SECRET", + client_app_id="CLIENT_APP", + tenant_id="TENANT_ID", + token_cache_path=None, + ) + ===========changed ref 12=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_success(mock_confidential_client_success): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert auth_claims.get("oid") == "OID_X" + assert auth_claims.get("groups") == ["GROUP_Y", "GROUP_Z"] + ===========changed ref 13=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_overage_success(mock_confidential_client_overage, mock_list_groups_success): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert auth_claims.get("oid") == "OID_X" + assert auth_claims.get("groups") == ["OVERAGE_GROUP_Y", "OVERAGE_GROUP_Z"] + ===========changed ref 14=========== + # module: tests.test_adlsgen2setup + @pytest.fixture + def mock_open(monkeypatch): + class MockOpenedFile: + def __enter__(self, *args, **kwargs): + pass + + def __exit__(self, *args, **kwargs): + return self + + def mock_open(*args, **kwargs): + return MockOpenedFile() + + monkeypatch.setattr(builtins, "open", mock_open) + ===========changed ref 15=========== # module: scripts.prepdocs @retry( retry=retry_if_exception_type(openai.error.RateLimitError), wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep, ) def compute_embedding(text, embedding_deployment, embedding_model): refresh_openai_token() + embedding_args = {"deployment_id": embedding_deployment} if args.openaihost != "openai" else {} - embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {} return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"] ===========changed ref 16=========== # module: app.backend.approaches.approach + class Approach(ABC): + def build_filter(self, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> str: + exclude_category = overrides.get("exclude_category") or None + security_filter = AuthenticationHelper.build_security_filters(overrides, auth_claims) + filters = [] + if exclude_category: + filters.append("category ne '{}'".format(exclude_category.replace("'", "''"))) + if security_filter: + filters.append(security_filter) + return None if len(filters) == 0 else " and ".join(filters) +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_without_streaming
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<0>:<add> extra_info, chat_coroutine = await self.run_until_final_call( <del> extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=False) <1>:<add> history, overrides, auth_claims, should_stream=False <add> )
<s>.approaches.chatreadretrieveread + class ChatReadRetrieveReadApproach(Approach): - class ChatReadRetrieveReadApproach: + def run_without_streaming( + self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] - def run_without_streaming(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> dict[str, Any]: + ) -> dict[str, Any]: <0> extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=False) <1> chat_resp = await chat_coroutine <2> chat_content = chat_resp.choices[0].message.content <3> extra_info["answer"] = chat_content <4> return extra_info <5>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool=False) -> tuple ===========changed ref 0=========== <s>veread + class ChatReadRetrieveReadApproach(Approach): - class ChatReadRetrieveReadApproach: def run_until_final_call( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + should_stream: bool = False, - self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False + top = overrides.get("top", 3) - top = overrides.get("top") or 3 - exclude_category = overrides.get("exclude_category") or None - filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None + filter = self.build_filter(overrides, auth_claims) user_query_request = "Generate search query for: " + history[-1]["user"] functions = [ { "name": "search_sources", "description": "Retrieve sources from the Azure Cognitive Search index", "parameters": { "type": "object", "properties": { "search_query": { "type": "string", "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", } }, "required": ["search_query"], }, } ] # STEP 1: Generate an optimized keyword search query based on the chat history and the last question messages = self.get_messages_from_history( self.query_prompt_template, self.chatgpt_model, history, user_query_request, self.query_</s> ===========changed ref 1=========== <s> ChatReadRetrieveReadApproach(Approach): - class ChatReadRetrieveReadApproach: def run_until_final_call( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + should_stream: bool = False, - self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: # offset: 1 <s>template, self.chatgpt_model, history, user_query_request, self.query_prompt_few_shots, self.chatgpt_token_limit - len(user_query_request), ) chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=0.0, max_tokens=32, n=1, functions=functions, function_call="auto", ) query_text = self.get_search_query(chat_completion, history[-1]["user"]) # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query # If retrieval mode includes vectors, compute an embedding for the query if has_vector: embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval</s>
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run_with_streaming
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<0>:<add> extra_info, chat_coroutine = await self.run_until_final_call( <del> extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=True) <1>:<add> history, overrides, auth_claims, should_stream=True <add> )
# module: app.backend.approaches.chatreadretrieveread + class ChatReadRetrieveReadApproach(Approach): - class ChatReadRetrieveReadApproach: def run_with_streaming( + self, history: list[dict[str, str]], overrides: dict[str, Any], auth_claims: dict[str, Any] - self, history: list[dict[str, str]], overrides: dict[str, Any] ) -> AsyncGenerator[dict, None]: <0> extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=True) <1> yield extra_info <2> async for event in await chat_coroutine: <3> # "2023-07-01-preview" API version has a bug where first response has empty choices <4> if event["choices"]: <5> yield event <6>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach run_until_final_call(history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool=False) -> tuple at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== <s>veread + class ChatReadRetrieveReadApproach(Approach): - class ChatReadRetrieveReadApproach: def run_until_final_call( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + should_stream: bool = False, - self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False + top = overrides.get("top", 3) - top = overrides.get("top") or 3 - exclude_category = overrides.get("exclude_category") or None - filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None + filter = self.build_filter(overrides, auth_claims) user_query_request = "Generate search query for: " + history[-1]["user"] functions = [ { "name": "search_sources", "description": "Retrieve sources from the Azure Cognitive Search index", "parameters": { "type": "object", "properties": { "search_query": { "type": "string", "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", } }, "required": ["search_query"], }, } ] # STEP 1: Generate an optimized keyword search query based on the chat history and the last question messages = self.get_messages_from_history( self.query_prompt_template, self.chatgpt_model, history, user_query_request, self.query_</s> ===========changed ref 1=========== <s> ChatReadRetrieveReadApproach(Approach): - class ChatReadRetrieveReadApproach: def run_until_final_call( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + should_stream: bool = False, - self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: # offset: 1 <s>template, self.chatgpt_model, history, user_query_request, self.query_prompt_few_shots, self.chatgpt_token_limit - len(user_query_request), ) chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=0.0, max_tokens=32, n=1, functions=functions, function_call="auto", ) query_text = self.get_search_query(chat_completion, history[-1]["user"]) # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query # If retrieval mode includes vectors, compute an embedding for the query if has_vector: embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval</s> ===========changed ref 2=========== <s> ChatReadRetrieveReadApproach(Approach): - class ChatReadRetrieveReadApproach: def run_until_final_call( + self, + history: list[dict[str, str]], + overrides: dict[str, Any], + auth_claims: dict[str, Any], + should_stream: bool = False, - self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False ) -> tuple: # offset: 2 <s> text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: </s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<3>:<add> top = overrides.get("top", 3) <del> top = overrides.get("top") or 3 <4>:<del> exclude_category = overrides.get("exclude_category") or None <5>:<del> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6>:<add> filter = self.build_filter(overrides, auth_claims)
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): + def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <10> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q) <11> query_vector = embedding["data"][0]["embedding"] <12> else: <13> query_vector = None <14> <15> # Only keep the text query if the retrieval mode uses text, otherwise drop it <16> query_text = q if has_text else "" <17> <18> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <19> if overrides.get("semantic_ranker") and has_text: <20> r = await self.search_client.search( <21> query_text, <22> filter=filter, <23> query_type=QueryType.SEMANTIC, <24> query_language="en-us", <25> query_speller="lexicon", <26> semantic_configuration_name="default", <27> top=top,</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): + def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 1 vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: results = [ doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r] content = "\n".join(results) message_builder = MessageBuilder( overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model ) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message("user", user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message("assistant", self.answer) message_builder.append_message("user", self.question) messages = message_builder.messages chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(AskApproach): + def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 2 <s> chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {} chat_completion = await openai.ChatCompletion.acreate( **chatgpt_args, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1, ) return { "data_points": results, "answer": chat_completion.choices[0].message.content, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + "\n\n".join([str(message) for message in messages]), } ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = ( "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " + "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + "If you cannot answer using the sources below, say you don't know. Use below example to answer" ) question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_host = openai_host self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.embedding_model = embedding_model self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.AskApproach run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": system_content}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" ===========unchanged ref 2=========== acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== + # module: app.backend.core.authentication + + ===========changed ref 1=========== + # module: tests.test_adlsgen2setup + +
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<3>:<add> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <add> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <11>:<add> r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims) <del> r = await impl.run(request_json["question"], request_json.get("overrides") or {})
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> approach = request_json["approach"] <4> try: <5> impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) <6> if not impl: <7> return jsonify({"error": "unknown approach"}), 400 <8> # Workaround for: https://github.com/openai/openai-python/issues/371 <9> async with aiohttp.ClientSession() as s: <10> openai.aiosession.set(s) <11> r = await impl.run(request_json["question"], request_json.get("overrides") or {}) <12> return jsonify(r) <13> except Exception as e: <14> logging.exception("Exception in /ask") <15> return jsonify({"error": str(e)}), 500 <16>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_ASK_APPROACHES = "ask_approaches" bp = Blueprint("routes", __name__, static_folder="static") at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== # module: app.backend.app + # Empty page is recommended for login redirect to work. + # See https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-browser/docs/initialization.md#redirecturi-considerations for more information + @bp.route("/redirect") + async def redirect(): + return "" + ===========changed ref 1=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" + CONFIG_AUTH_CLIENT = "auth_client" + CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 2=========== + # module: tests.test_manageacl + + ===========changed ref 3=========== + # module: app.backend.core.authentication + + ===========changed ref 4=========== + # module: tests.test_adlsgen2setup + + ===========changed ref 5=========== + # module: tests.test_authenticationhelper + + ===========changed ref 6=========== # module: tests.conftest + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 7=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 8=========== # module: tests.conftest + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 9=========== # module: tests.conftest + class MockResponse: + def text(self): + return self._text + ===========changed ref 10=========== # module: tests.conftest + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 11=========== # module: tests.conftest + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 12=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + self.num = len(results) + ===========changed ref 13=========== + # module: app.backend.core.authentication + class AuthenticationHelper: + scope: str = "https://graph.microsoft.com/.default" + ===========changed ref 14=========== + # module: app.backend.core.authentication + # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API + class AuthError(Exception): + def __init__(self, error, status_code): + self.error = error + self.status_code = status_code + ===========changed ref 15=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + """ + Sets up a Data Lake Storage Gen 2 account with sample data and access control + """ + ===========changed ref 16=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __anext__(self): + self.num -= 1 + if self.num >= 0: + return self.results[self.num] + + raise StopAsyncIteration + ===========changed ref 17=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + def create_service_client(self): + return DataLakeServiceClient( + account_url=f"https://{self.storage_account_name}.dfs.core.windows.net", credential=self.credentials + ) + ===========changed ref 18=========== + # module: scripts.manageacl + class ManageAcl: + def get_documents(self, search_client: SearchClient): + filter = f"sourcefile eq '{self.document}'" + result = await search_client.search("", filter=filter, select=["id", self.acl_type]) + return result + ===========changed ref 19=========== + # module: scripts.manageacl + class ManageAcl: + """ + Manually enable document level access control on a search index and manually set access control values using the [manageacl.ps1](./scripts/manageacl.ps1) script. + """ + ===========changed ref 20=========== + # module: scripts.manageacl + class ManageAcl: + def view_acl(self, search_client: SearchClient): + async for document in await self.get_documents(search_client): + # Assumes the acls are consistent across all sections of the document + print(json.dumps(document[self.acl_type])) + return + ===========changed ref 21=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_success(mock_list_groups_success): + groups = await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert groups == ["OVERAGE_GROUP_Y", "OVERAGE_GROUP_Z"] + ===========changed ref 22=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_overage_unauthorized(mock_confidential_client_overage, mock_list_groups_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 + ===========changed ref 23=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_unauthorized(mock_confidential_client_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 +
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<3>:<add> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <add> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <11>:<add> r = await impl.run_without_streaming( <add> request_json["history"], request_json.get("overrides", {}), auth_claims <add> ) <del> r = await impl.run_without_streaming(request_json["history"], request_json.get("overrides", {}))
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> approach = request_json["approach"] <4> try: <5> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <6> if not impl: <7> return jsonify({"error": "unknown approach"}), 400 <8> # Workaround for: https://github.com/openai/openai-python/issues/371 <9> async with aiohttp.ClientSession() as s: <10> openai.aiosession.set(s) <11> r = await impl.run_without_streaming(request_json["history"], request_json.get("overrides", {})) <12> return jsonify(r) <13> except Exception as e: <14> logging.exception("Exception in /chat") <15> return jsonify({"error": str(e)}), 500 <16>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_CHAT_APPROACHES = "chat_approaches" bp = Blueprint("routes", __name__, static_folder="static") at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== # module: app.backend.app + # Empty page is recommended for login redirect to work. + # See https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-browser/docs/initialization.md#redirecturi-considerations for more information + @bp.route("/redirect") + async def redirect(): + return "" + ===========changed ref 1=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" + CONFIG_AUTH_CLIENT = "auth_client" + CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 2=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() + auth_helper = current_app.config[CONFIG_AUTH_CLIENT] + auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) approach = request_json["approach"] try: impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) + r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims) - r = await impl.run(request_json["question"], request_json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 3=========== + # module: tests.test_manageacl + + ===========changed ref 4=========== + # module: app.backend.core.authentication + + ===========changed ref 5=========== + # module: tests.test_adlsgen2setup + + ===========changed ref 6=========== + # module: tests.test_authenticationhelper + + ===========changed ref 7=========== # module: tests.conftest + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 8=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 9=========== # module: tests.conftest + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 10=========== # module: tests.conftest + class MockResponse: + def text(self): + return self._text + ===========changed ref 11=========== # module: tests.conftest + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 12=========== # module: tests.conftest + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 13=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + self.num = len(results) + ===========changed ref 14=========== + # module: app.backend.core.authentication + class AuthenticationHelper: + scope: str = "https://graph.microsoft.com/.default" + ===========changed ref 15=========== + # module: app.backend.core.authentication + # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API + class AuthError(Exception): + def __init__(self, error, status_code): + self.error = error + self.status_code = status_code + ===========changed ref 16=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + """ + Sets up a Data Lake Storage Gen 2 account with sample data and access control + """ + ===========changed ref 17=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __anext__(self): + self.num -= 1 + if self.num >= 0: + return self.results[self.num] + + raise StopAsyncIteration + ===========changed ref 18=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + def create_service_client(self): + return DataLakeServiceClient( + account_url=f"https://{self.storage_account_name}.dfs.core.windows.net", credential=self.credentials + ) + ===========changed ref 19=========== + # module: scripts.manageacl + class ManageAcl: + def get_documents(self, search_client: SearchClient): + filter = f"sourcefile eq '{self.document}'" + result = await search_client.search("", filter=filter, select=["id", self.acl_type]) + return result + ===========changed ref 20=========== + # module: scripts.manageacl + class ManageAcl: + """ + Manually enable document level access control on a search index and manually set access control values using the [manageacl.ps1](./scripts/manageacl.ps1) script. + """ + ===========changed ref 21=========== + # module: scripts.manageacl + class ManageAcl: + def view_acl(self, search_client: SearchClient): + async for document in await self.get_documents(search_client): + # Assumes the acls are consistent across all sections of the document + print(json.dumps(document[self.acl_type])) + return +
app.backend.app/chat_stream
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<3>:<add> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <add> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <8>:<add> response_generator = impl.run_with_streaming( <add> request_json["history"], request_json.get("overrides", {}), auth_claims <add> ) <del> response_generator = impl.run_with_streaming(request_json["history"], request_json.get("overrides", {}))
# module: app.backend.app @bp.route("/chat_stream", methods=["POST"]) async def chat_stream(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> approach = request_json["approach"] <4> try: <5> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <6> if not impl: <7> return jsonify({"error": "unknown approach"}), 400 <8> response_generator = impl.run_with_streaming(request_json["history"], request_json.get("overrides", {})) <9> response = await make_response(format_as_ndjson(response_generator)) <10> response.timeout = None # type: ignore <11> return response <12> except Exception as e: <13> logging.exception("Exception in /chat") <14> return jsonify({"error": str(e)}), 500 <15>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACHES = "chat_approaches" bp = Blueprint("routes", __name__, static_folder="static") format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None] at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.app + # Empty page is recommended for login redirect to work. + # See https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-browser/docs/initialization.md#redirecturi-considerations for more information + @bp.route("/redirect") + async def redirect(): + return "" + ===========changed ref 1=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" + CONFIG_AUTH_CLIENT = "auth_client" + CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 2=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() + auth_helper = current_app.config[CONFIG_AUTH_CLIENT] + auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) approach = request_json["approach"] try: impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) + r = await impl.run_without_streaming( + request_json["history"], request_json.get("overrides", {}), auth_claims + ) - r = await impl.run_without_streaming(request_json["history"], request_json.get("overrides", {})) return jsonify(r) except Exception as e: logging.exception("Exception in /chat") return jsonify({"error": str(e)}), 500 ===========changed ref 3=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() + auth_helper = current_app.config[CONFIG_AUTH_CLIENT] + auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) approach = request_json["approach"] try: impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) + r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims) - r = await impl.run(request_json["question"], request_json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 4=========== + # module: tests.test_manageacl + + ===========changed ref 5=========== + # module: app.backend.core.authentication + + ===========changed ref 6=========== + # module: tests.test_adlsgen2setup + + ===========changed ref 7=========== + # module: tests.test_authenticationhelper + + ===========changed ref 8=========== # module: tests.conftest + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 9=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 10=========== # module: tests.conftest + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 11=========== # module: tests.conftest + class MockResponse: + def text(self): + return self._text + ===========changed ref 12=========== # module: tests.conftest + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 13=========== # module: tests.conftest + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 14=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + self.num = len(results) + ===========changed ref 15=========== + # module: app.backend.core.authentication + class AuthenticationHelper: + scope: str = "https://graph.microsoft.com/.default" + ===========changed ref 16=========== + # module: app.backend.core.authentication + # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API + class AuthError(Exception): + def __init__(self, error, status_code): + self.error = error + self.status_code = status_code + ===========changed ref 17=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + """ + Sets up a Data Lake Storage Gen 2 account with sample data and access control + """ + ===========changed ref 18=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __anext__(self): + self.num -= 1 + if self.num >= 0: + return self.results[self.num] + + raise StopAsyncIteration + ===========changed ref 19=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + def create_service_client(self): + return DataLakeServiceClient( + account_url=f"https://{self.storage_account_name}.dfs.core.windows.net", credential=self.credentials + ) + ===========changed ref 20=========== + # module: scripts.manageacl + class ManageAcl: + def get_documents(self, search_client: SearchClient): + filter = f"sourcefile eq '{self.document}'" + result = await search_client.search("", filter=filter, select=["id", self.acl_type]) + return result + ===========changed ref 21=========== + # module: scripts.manageacl + class ManageAcl: + """ + Manually enable document level access control on a search index and manually set access control values using the [manageacl.ps1](./scripts/manageacl.ps1) script. + """ + ===========changed ref 22=========== + # module: scripts.manageacl + class ManageAcl: + def view_acl(self, search_client: SearchClient): + async for document in await self.get_documents(search_client): + # Assumes the acls are consistent across all sections of the document + print(json.dumps(document[self.acl_type])) + return + ===========changed ref 23=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_success(mock_list_groups_success): + groups = await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert groups == ["OVERAGE_GROUP_Y", "OVERAGE_GROUP_Z"] +
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<16>:<add> AZURE_USE_AUTHENTICATION = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" <add> AZURE_SERVER_APP_ID = os.getenv("AZURE_SERVER_APP_ID") <add> AZURE_SERVER_APP_SECRET = os.getenv("AZURE_SERVER_APP_SECRET") <add> AZURE_CLIENT_APP_ID = os.getenv("AZURE_CLIENT_APP_ID") <add> AZURE_TENANT_ID = os.getenv("AZURE_TENANT_ID") <add> TOKEN_CACHE_PATH = os.getenv("TOKEN_CACHE_PATH")
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Replace these with your own values, either in environment variables or directly here <1> AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] <2> AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] <3> AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] <4> AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] <5> # Shared by all OpenAI deployments <6> OPENAI_HOST = os.getenv("OPENAI_HOST", "azure") <7> OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <8> OPENAI_EMB_MODEL = os.getenv("AZURE_OPENAI_EMB_MODEL_NAME", "text-embedding-ada-002") <9> # Used with Azure OpenAI deployments <10> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <11> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <12> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") <13> # Used only with non-Azure OpenAI deployments <14> OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") <15> OPENAI_ORGANIZATION = os.getenv("OPENAI_ORGANIZATION") <16> <17> KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") <18> KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") <19> <20> # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, <21> # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances</s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 # keys for each service # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True) # Set up clients for Cognitive Search and Storage search_client = SearchClient( endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", index_name=AZURE_SEARCH_INDEX, credential=azure_credential, ) blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential ) blob_container_client = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) # Used by the OpenAI SDK if OPENAI_HOST == "azure": openai.api_type = "azure_ad" openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" openai.api_version = "2023-07-01-preview" openai_token = await azure_credential.get_token("https://cognitiveservices.azure.com/.default") openai.api_key = openai_token.token # Store on app.config for later use inside requests current_app.config[CONFIG_OPENAI_TOKEN] = openai_token else: openai.api_type = "openai" openai.api_key = OPENAI_API_KEY openai.organization = OPENAI_ORGANIZATION current_app.config[CONFIG_CREDENTIAL] = azure_credential current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client # Various approaches to integrate GPT and external knowledge, most applications will use a single one</s> ===========below chunk 1=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 2 <s>] = blob_container_client # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), "rrr": ReadRetrieveReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), "rda": ReadDecomposeAsk( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), } current_app.config[CONFIG_CHAT_APPROACHES] = { "rrr": ChatReadRetrieveReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATG</s> ===========below chunk 2=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 3 <s>MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) } ===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" bp = Blueprint("routes", __name__, static_folder="static") at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: approaches.readdecomposeask ReadDecomposeAsk(search_client: SearchClient, openai_host: str, openai_deployment: str, openai_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: approaches.readretrieveread ReadRetrieveReadApproach(search_client: SearchClient, openai_host: str, openai_deployment: str, openai_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: openai api_key = os.environ.get("OPENAI_API_KEY") organization = os.environ.get("OPENAI_ORGANIZATION") api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") api_type = os.environ.get("OPENAI_API_TYPE", "open_ai") ===========unchanged ref 1=========== api_version = os.environ.get( "OPENAI_API_VERSION", ("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None), ) at: os environ = _createenviron() getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: app.backend.app + # Empty page is recommended for login redirect to work. + # See https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-browser/docs/initialization.md#redirecturi-considerations for more information + @bp.route("/redirect") + async def redirect(): + return "" + ===========changed ref 1=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACHES = "ask_approaches" CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" + CONFIG_AUTH_CLIENT = "auth_client" + CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static")
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.search
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<3>:<add> top = overrides.get("top", 3) <del> top = overrides.get("top") or 3 <4>:<del> exclude_category = overrides.get("exclude_category") or None <5>:<del> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6>:<add> filter = self.build_filter(overrides, auth_claims)
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): + def search( + self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any] + ) -> tuple[list[str], str]: - def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} <10> embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) <11> query_vector = embedding["data"][0]["embedding"] <12> else: <13> query_vector = None <14> <15> # Only keep the text query if the retrieval mode uses text, otherwise drop it <16> if not has_text: <17> query_text = "" <18> <19> if overrides.get("semantic_ranker") and has_text: <20> r = await self.search_client.search( <21> query_text, <22> filter=filter, <23> query_type=QueryType.SEMANTIC, <24> query_language="en-us", <25> query_speller="lexicon", <26> semantic_configuration_name="default", <27> top=top, <28> query_</s>
===========below chunk 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): + def search( + self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any] + ) -> tuple[list[str], str]: - def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: # offset: 1 vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) else: r = await self.search_client.search( query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None, ) if use_semantic_captions: self.results = [ doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc["@search.captions"]])) async for doc in r ] else: results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) async for doc in r] return results, "\n".join(results) ===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment self.embedding_model = embedding_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.openai_host = openai_host at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== + # module: tests.test_manageacl + + ===========changed ref 1=========== + # module: app.backend.core.authentication + + ===========changed ref 2=========== + # module: tests.test_adlsgen2setup + + ===========changed ref 3=========== + # module: tests.test_authenticationhelper + + ===========changed ref 4=========== # module: tests.conftest + class MockResponse: + def __aexit__(self, exc_type, exc, tb): + pass + ===========changed ref 5=========== # module: app.backend.app + # Empty page is recommended for login redirect to work. + # See https://github.com/AzureAD/microsoft-authentication-library-for-js/blob/dev/lib/msal-browser/docs/initialization.md#redirecturi-considerations for more information + @bp.route("/redirect") + async def redirect(): + return "" + ===========changed ref 6=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __aiter__(self): + return self + ===========changed ref 7=========== # module: tests.conftest + class MockResponse: + def __aenter__(self): + return self + ===========changed ref 8=========== # module: tests.conftest + class MockResponse: + def text(self): + return self._text + ===========changed ref 9=========== # module: tests.conftest + class MockResponse: + def json(self): + return json.loads(self.text) + ===========changed ref 10=========== # module: tests.conftest + class MockResponse: + def __init__(self, text, status): + self.text = text + self.status = status + ===========changed ref 11=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __init__(self, results): + self.results = results + self.num = len(results) + ===========changed ref 12=========== + # module: app.backend.core.authentication + class AuthenticationHelper: + scope: str = "https://graph.microsoft.com/.default" + ===========changed ref 13=========== + # module: app.backend.core.authentication + # AuthError is raised when the authentication token sent by the client UI cannot be parsed or there is an authentication error accessing the graph API + class AuthError(Exception): + def __init__(self, error, status_code): + self.error = error + self.status_code = status_code + ===========changed ref 14=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + """ + Sets up a Data Lake Storage Gen 2 account with sample data and access control + """ + ===========changed ref 15=========== + # module: tests.test_manageacl + class AsyncSearchResultsIterator: + def __anext__(self): + self.num -= 1 + if self.num >= 0: + return self.results[self.num] + + raise StopAsyncIteration + ===========changed ref 16=========== # module: app.backend.app + # Send MSAL.js settings to the client UI + @bp.route("/auth_setup", methods=["GET"]) + def auth_setup(): + auth_helper = current_app.config[CONFIG_AUTH_CLIENT] + return jsonify(auth_helper.get_auth_setup_for_client()) + ===========changed ref 17=========== + # module: scripts.adlsgen2setup + class AdlsGen2Setup: + def create_service_client(self): + return DataLakeServiceClient( + account_url=f"https://{self.storage_account_name}.dfs.core.windows.net", credential=self.credentials + ) + ===========changed ref 18=========== + # module: scripts.manageacl + class ManageAcl: + def get_documents(self, search_client: SearchClient): + filter = f"sourcefile eq '{self.document}'" + result = await search_client.search("", filter=filter, select=["id", self.acl_type]) + return result + ===========changed ref 19=========== + # module: scripts.manageacl + class ManageAcl: + """ + Manually enable document level access control on a search index and manually set access control values using the [manageacl.ps1](./scripts/manageacl.ps1) script. + """ + ===========changed ref 20=========== + # module: scripts.manageacl + class ManageAcl: + def view_acl(self, search_client: SearchClient): + async for document in await self.get_documents(search_client): + # Assumes the acls are consistent across all sections of the document + print(json.dumps(document[self.acl_type])) + return + ===========changed ref 21=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_list_groups_success(mock_list_groups_success): + groups = await AuthenticationHelper.list_groups(graph_resource_access_token={"access_token": "MockToken"}) + assert groups == ["OVERAGE_GROUP_Y", "OVERAGE_GROUP_Z"] + ===========changed ref 22=========== + # module: tests.test_authenticationhelper + @pytest.mark.asyncio + async def test_get_auth_claims_overage_unauthorized(mock_confidential_client_overage, mock_list_groups_unauthorized): + helper = create_authentication_helper() + auth_claims = await helper.get_auth_claims_if_enabled(headers={"Authorization": "Bearer Token"}) + assert len(auth_claims.keys()) == 0 +
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.run
Modified
Azure-Samples~azure-search-openai-demo
c8b8486d68527026b0660a3a75cc4e5e376793c1
Add support for an optional login and document level access control system. (#624)
<4>:<add> search_results, content = await self.search(q, overrides, auth_claims) <del> search_results, content = await self.search(q, overrides)
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): + def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: <0> search_results = None <1> <2> async def search_and_store(q: str) -> Any: <3> nonlocal search_results <4> search_results, content = await self.search(q, overrides) <5> return content <6> <7> # Use to capture thought process during iterations <8> cb_handler = HtmlCallbackHandler() <9> cb_manager = CallbackManager(handlers=[cb_handler]) <10> <11> if self.openai_host == "azure": <12> llm = AzureOpenAI( <13> deployment_name=self.openai_deployment, <14> temperature=overrides.get("temperature", 0.3), <15> openai_api_key=openai.api_key, <16> ) <17> else: <18> llm = OpenAI( <19> model_name=self.openai_model, <20> temperature=overrides.get("temperature", 0.3), <21> openai_api_key=openai.api_key, <22> ) <23> tools = [ <24> Tool( <25> name="Search", <26> func=lambda _: "Not implemented", <27> coroutine=search_and_store, <28> description="useful for when you need to ask with search", <29> callbacks=cb_manager, <30> ), <31> Tool( <32> name="Lookup", <33> func=lambda _: "Not implemented", <34> coroutine=self.lookup, <35> description="useful for when you need to ask with lookup", <36> callbacks=cb_manager, <37> ), <38> ] <39> <40> prompt_prefix = overrides.get("prompt_template") <41> prompt = PromptTemplate.from_examples( <42> EXAMPLES, <43> SUFFIX, <44> </s>
===========below chunk 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): + def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]: # offset: 1 prompt_prefix + "\n\n" + PREFIX if prompt_prefix else PREFIX, ) class ReAct(ReActDocstoreAgent): @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: return prompt agent = ReAct.from_llm_and_tools(llm, tools) chain = AgentExecutor.from_agent_and_tools(agent, tools, verbose=True, callback_manager=cb_manager) result = await chain.arun(q) # Replace substrings of the form <file.ext> with [file.ext] so that the frontend can render them as links, match them with a regex to avoid # generalizing too much and disrupt HTML snippets if present result = re.sub(r"<([a-zA-Z0-9_ \-\.]+)>", r"[\1]", result) return {"data_points": search_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask EXAMPLES = [ """Question: What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into? Thought: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado orogeny extends into, then find the elevation range of the area. Action: Search[Colorado orogeny] Observation: <info1.pdf> The Colorado orogeny was an episode of mountain building (an orogeny) in Colorado and surrounding areas. Thought: It does not mention the eastern sector. So I need to look up eastern sector. Action: Lookup[eastern sector] Observation: <info2.txt> (Result 1 / 1) The eastern sector extends into the High Plains and is called the Central Plains orogeny. Thought: The eastern sector of Colorado orogeny extends into the High Plains. So I need to search High Plains and find its elevation range. Action: Search[High Plains] Observation: <some_file.pdf> High Plains refers to one of two distinct land regions Thought: I need to instead search High Plains (United States). Action: Search[High Plains (United States)] Observation: <filea.pdf> The High Plains are a subregion of the Great Plains. <another-ref.docx> From east to west, the High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130 m). Thought: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft. Action: Finish[1,800 to 7,000 ft <filea.pdf>]""", """Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons" character Milhouse, who</s> ===========unchanged ref 1=========== SUFFIX = """\nQuestion: {input} {agent_scratchpad}""" PREFIX = ( "Answer questions as shown in the following examples, by splitting the question into individual search or lookup actions to find facts until you can answer the question. " "Observations are prefixed by their source name in angled brackets, source names MUST be included with the actions in the answers." "All questions must be answered from the results from search or look up actions, only facts resulting from those can be used in an answer. " ) at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str] search(query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str] lookup(q: str) -> Optional[str] at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.__init__ self.openai_deployment = openai_deployment self.openai_model = openai_model self.openai_host = openai_host at: approaches.approach.AskApproach run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any] at: langchainadapters.HtmlCallbackHandler html: str = "" get_and_reset_log() -> str at: openai api_key = os.environ.get("OPENAI_API_KEY") ===========unchanged ref 2=========== at: re sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(AskApproach): + def search( + self, query_text: str, overrides: dict[str, Any], auth_claims: dict[str, Any] + ) -> tuple[list[str], str]: - def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False + top = overrides.get("top", 3) - top = overrides.get("top") or 3 - exclude_category = overrides.get("exclude_category") or None - filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None + filter = self.build_filter(overrides, auth_claims) # If retrieval mode includes vectors, compute an embedding for the query if has_vector: embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {} embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text) query_vector = embedding["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = "" if overrides.get("semantic_ranker") and has_text: r = await self.search_client.search( query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top,</s>
tests.test_app/test_missing_env_vars
Modified
Azure-Samples~azure-search-openai-demo
2ff334456c3d0bff6bf000c423e90a6180619484
Add E2E tests using Playwright, increase accessibility of buttons (#677)
<0>:<add> with mock.patch.dict(os.environ, clear=True): <add> quart_app = app.create_app() <del> quart_app = app.create_app() <2>:<add> with pytest.raises(quart.testing.app.LifespanError, match="Error during startup 'AZURE_STORAGE_ACCOUNT'"): <del> with pytest.raises(quart.testing.app.LifespanError) as exc_info: <3>:<add> async with quart_app.test_app() as test_app: <del> async with quart_app.test_app() as test_app: <4>:<add> test_app.test_client() <del> test_app.test_client() <5>:<del> assert str(exc_info.value) == "Lifespan failure in startup. ''AZURE_OPENAI_EMB_DEPLOYMENT''"
# module: tests.test_app @pytest.mark.asyncio async def test_missing_env_vars(): <0> quart_app = app.create_app() <1> <2> with pytest.raises(quart.testing.app.LifespanError) as exc_info: <3> async with quart_app.test_app() as test_app: <4> test_app.test_client() <5> assert str(exc_info.value) == "Lifespan failure in startup. ''AZURE_OPENAI_EMB_DEPLOYMENT''" <6>
tests.conftest/client
Modified
Azure-Samples~azure-search-openai-demo
2ff334456c3d0bff6bf000c423e90a6180619484
Add E2E tests using Playwright, increase accessibility of buttons (#677)
<0>:<del> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1>:<del> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2>:<del> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3>:<del> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4>:<del> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <5>:<del> for key, value in request.param.items(): <6>:<del> monkeypatch.setenv(key, value) <7>:<del> if os.getenv("AZURE_USE_AUTHENTICATION") is not None: <8>:<del> monkeypatch.delenv("AZURE_USE_AUTHENTICATION") <9>:<add> quart_app = app.create_app() <10>:<del> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <11>:<del> mock_default_azure_credential.return_value = MockAzureCredential() <12>:<del> quart_app = app.create_app() <13>:<add> async with quart_app.test_app() as test_app: <add> quart_app.config.update({"TESTING": True}) <14>:<del> async with quart_app.test_app() as test_app: <15>:<del> quart_app.config.update({"TESTING": True}) <16>:<add> yield test_app.test_client() <17>:<del> yield test_app.test_client() <18>:<del>
# module: tests.conftest + @pytest_asyncio.fixture() - @pytest_asyncio.fixture(params=envs) + async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): - async def client(monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): <0> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <1> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <2> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <3> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <4> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <5> for key, value in request.param.items(): <6> monkeypatch.setenv(key, value) <7> if os.getenv("AZURE_USE_AUTHENTICATION") is not None: <8> monkeypatch.delenv("AZURE_USE_AUTHENTICATION") <9> <10> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <11> mock_default_azure_credential.return_value = MockAzureCredential() <12> quart_app = app.create_app() <13> <14> async with quart_app.test_app() as test_app: <15> quart_app.config.update({"TESTING": True}) <16> <17> yield test_app.test_client() <18>
===========changed ref 0=========== # module: tests.conftest + @pytest.fixture(params=envs, ids=["client0", "client1"]) + def mock_env(monkeypatch, request): + with mock.patch.dict(os.environ, clear=True): + monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") + monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") + monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") + monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") + monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") + for key, value in request.param.items(): + monkeypatch.setenv(key, value) + if os.getenv("AZURE_USE_AUTHENTICATION") is not None: + monkeypatch.delenv("AZURE_USE_AUTHENTICATION") + + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + yield + ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_missing_env_vars(): + with mock.patch.dict(os.environ, clear=True): + quart_app = app.create_app() - quart_app = app.create_app() + with pytest.raises(quart.testing.app.LifespanError, match="Error during startup 'AZURE_STORAGE_ACCOUNT'"): - with pytest.raises(quart.testing.app.LifespanError) as exc_info: + async with quart_app.test_app() as test_app: - async with quart_app.test_app() as test_app: + test_app.test_client() - test_app.test_client() - assert str(exc_info.value) == "Lifespan failure in startup. ''AZURE_OPENAI_EMB_DEPLOYMENT''"
app.backend.app/assets
Modified
Azure-Samples~azure-search-openai-demo
2ff334456c3d0bff6bf000c423e90a6180619484
Add E2E tests using Playwright, increase accessibility of buttons (#677)
<0>:<add> return await send_from_directory(Path(__file__).resolve().parent / "static" / "assets", path) <del> return await send_from_directory("static/assets", path)
# module: app.backend.app @bp.route("/assets/<path:path>") async def assets(path): <0> return await send_from_directory("static/assets", path) <1>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_missing_env_vars(): + with mock.patch.dict(os.environ, clear=True): + quart_app = app.create_app() - quart_app = app.create_app() + with pytest.raises(quart.testing.app.LifespanError, match="Error during startup 'AZURE_STORAGE_ACCOUNT'"): - with pytest.raises(quart.testing.app.LifespanError) as exc_info: + async with quart_app.test_app() as test_app: - async with quart_app.test_app() as test_app: + test_app.test_client() - test_app.test_client() - assert str(exc_info.value) == "Lifespan failure in startup. ''AZURE_OPENAI_EMB_DEPLOYMENT''" ===========changed ref 1=========== # module: tests.conftest + @pytest.fixture(params=envs, ids=["client0", "client1"]) + def mock_env(monkeypatch, request): + with mock.patch.dict(os.environ, clear=True): + monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") + monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") + monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") + monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") + monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") + for key, value in request.param.items(): + monkeypatch.setenv(key, value) + if os.getenv("AZURE_USE_AUTHENTICATION") is not None: + monkeypatch.delenv("AZURE_USE_AUTHENTICATION") + + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + yield + ===========changed ref 2=========== # module: tests.conftest + @pytest_asyncio.fixture() - @pytest_asyncio.fixture(params=envs) + async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): - async def client(monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request): - monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") - monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") - monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") - monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") - monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") - for key, value in request.param.items(): - monkeypatch.setenv(key, value) - if os.getenv("AZURE_USE_AUTHENTICATION") is not None: - monkeypatch.delenv("AZURE_USE_AUTHENTICATION") + quart_app = app.create_app() - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - quart_app = app.create_app() + async with quart_app.test_app() as test_app: + quart_app.config.update({"TESTING": True}) - async with quart_app.test_app() as test_app: - quart_app.config.update({"TESTING": True}) + yield test_app.test_client() - yield test_app.test_client() -
tests.conftest/mock_env
Modified
Azure-Samples~azure-search-openai-demo
22a45dc1eab44c289e417de53c8b1d60d000e228
Enable CORS support (#717)
<6>:<add> monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com")
# module: tests.conftest @pytest.fixture(params=envs, ids=["client0", "client1"]) def mock_env(monkeypatch, request): <0> with mock.patch.dict(os.environ, clear=True): <1> monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") <2> monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") <3> monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") <4> monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") <5> monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") <6> for key, value in request.param.items(): <7> monkeypatch.setenv(key, value) <8> if os.getenv("AZURE_USE_AUTHENTICATION") is not None: <9> monkeypatch.delenv("AZURE_USE_AUTHENTICATION") <10> <11> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <12> mock_default_azure_credential.return_value = MockAzureCredential() <13> yield <14>
===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_cors_notallowed(client) -> None: + response = await client.get("/", headers={"Origin": "https://quart.com"}) + assert "Access-Control-Allow-Origin" not in response.headers + ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_cors_allowed(client) -> None: + response = await client.get("/", headers={"Origin": "https://frontend.com"}) + assert response.access_control_allow_origin == "https://frontend.com" + assert "Access-Control-Allow-Origin" in response.headers +
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
22a45dc1eab44c289e417de53c8b1d60d000e228
Enable CORS support (#717)
<6>:<add> <8>:<add> <add> if allowed_origin := os.getenv("ALLOWED_ORIGIN"): <add> app.logger.info("CORS enabled for %s", allowed_origin) <add> cors(app, allow_origin=allowed_origin, allow_methods=["GET", "POST"])
# module: app.backend.app def create_app(): <0> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <1> configure_azure_monitor() <2> AioHttpClientInstrumentor().instrument() <3> app = Quart(__name__) <4> app.register_blueprint(bp) <5> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) <6> # Level should be one of https://docs.python.org/3/library/logging.html#logging-levels <7> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", "ERROR")) <8> return app <9>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str] ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_cors_notallowed(client) -> None: + response = await client.get("/", headers={"Origin": "https://quart.com"}) + assert "Access-Control-Allow-Origin" not in response.headers + ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_cors_allowed(client) -> None: + response = await client.get("/", headers={"Origin": "https://frontend.com"}) + assert response.access_control_allow_origin == "https://frontend.com" + assert "Access-Control-Allow-Origin" in response.headers + ===========changed ref 2=========== # module: tests.conftest @pytest.fixture(params=envs, ids=["client0", "client1"]) def mock_env(monkeypatch, request): with mock.patch.dict(os.environ, clear=True): monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account") monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service") monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo") + monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com") for key, value in request.param.items(): monkeypatch.setenv(key, value) if os.getenv("AZURE_USE_AUTHENTICATION") is not None: monkeypatch.delenv("AZURE_USE_AUTHENTICATION") with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: mock_default_azure_credential.return_value = MockAzureCredential() yield
app.backend.app/create_app
Modified
Azure-Samples~azure-search-openai-demo
fd739e2772ec04946784a8c9537163317959a9c8
Default log level for dev vs prod (#718)
<8>:<add> default_level = "INFO" # In development, log more verbosely <add> if os.getenv("WEBSITE_HOSTNAME"): # In production, don't log as heavily <add> default_level = "WARNING" <add> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", default_level)) <del> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", "ERROR"))
# module: app.backend.app def create_app(): <0> if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): <1> configure_azure_monitor() <2> AioHttpClientInstrumentor().instrument() <3> app = Quart(__name__) <4> app.register_blueprint(bp) <5> app.asgi_app = OpenTelemetryMiddleware(app.asgi_app) <6> <7> # Level should be one of https://docs.python.org/3/library/logging.html#logging-levels <8> logging.basicConfig(level=os.getenv("APP_LOG_LEVEL", "ERROR")) <9> <10> if allowed_origin := os.getenv("ALLOWED_ORIGIN"): <11> app.logger.info("CORS enabled for %s", allowed_origin) <12> cors(app, allow_origin=allowed_origin, allow_methods=["GET", "POST"]) <13> return app <14>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder="static") at: logging basicConfig(*, filename: Optional[StrPath]=..., filemode: str=..., format: str=..., datefmt: Optional[str]=..., style: str=..., level: Optional[_Level]=..., stream: Optional[IO[str]]=..., handlers: Optional[Iterable[Handler]]=...) -> None at: os getenv(key: str, default: _T) -> Union[str, _T] getenv(key: str) -> Optional[str]
locustfile/ChatUser.ask_question
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<17>:<del> "approach": "rrr", <38>:<del> "approach": "rrr",
# module: locustfile class ChatUser(HttpUser): @task def ask_question(self): <0> self.client.get("/") <1> time.sleep(5) <2> self.client.post( <3> "/chat", <4> json={ <5> "history": [ <6> { <7> "user": random.choice( <8> [ <9> "What is included in my Northwind Health Plus plan that is not in standard?", <10> "What does a Product Manager do?", <11> "What happens in a performance review?", <12> "Whats your whistleblower policy?", <13> ] <14> ) <15> } <16> ], <17> "approach": "rrr", <18> "overrides": { <19> "retrieval_mode": "hybrid", <20> "semantic_ranker": True, <21> "semantic_captions": False, <22> "top": 3, <23> "suggest_followup_questions": False, <24> }, <25> }, <26> ) <27> time.sleep(5) <28> self.client.post( <29> "/chat", <30> json={ <31> "history": [ <32> { <33> "user": "What happens in a performance review?", <34> "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", <35> }, <36> {"user": "Does my plan cover eye exams?"}, <37> ], <38> "approach": "rrr", <39> "overrides": { <40> "retrieval_mode": "hybrid", <41> "semantic_</s>
===========below chunk 0=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, }, ) ===========unchanged ref 0=========== at: locustfile.ChatUser wait_time = between(5, 20) at: random choice = _inst.choice at: time sleep(secs: float) -> None ===========changed ref 0=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... -
tests.test_app/test_ask_rtr_text
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rtr",
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): <0> response = await client.post( <1> "/ask", <2> json={ <3> "approach": "rtr", <4> "question": "What is the capital of France?", <5> "overrides": {"retrieval_mode": "text"}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_ask_rtr_text response = await client.post( "/ask", json={ "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 2=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): self.client.get("/") time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": random.choice( [ "What is included in my Northwind Health Plus plan that is not in standard?", "What does a Product Manager do?", "What happens in a performance review?", "Whats your whistleblower policy?", ] ) } ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, }, ) time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "Does my plan cover eye exams?"}, ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, </s> ===========changed ref 3=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 <s>_captions": False, "top": 3, "suggest_followup_questions": False, }, }, )
tests.test_app/test_ask_rtr_text_filter
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<4>:<del> "approach": "rtr",
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): <0> response = await auth_client.post( <1> "/ask", <2> headers={"Authorization": "Bearer MockToken"}, <3> json={ <4> "approach": "rtr", <5> "question": "What is the capital of France?", <6> "overrides": { <7> "retrieval_mode": "text", <8> "use_oid_security_filter": True, <9> "use_groups_security_filter": True, <10> "exclude_category": "excluded", <11> }, <12> }, <13> ) <14> assert response.status_code == 200 <15> assert ( <16> auth_client.config[app.CONFIG_SEARCH_CLIENT].filter <17> == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" <18> ) <19> result = await response.get_json() <20> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <21>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_ask_rtr_text_filter response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 3=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): self.client.get("/") time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": random.choice( [ "What is included in my Northwind Health Plus plan that is not in standard?", "What does a Product Manager do?", "What happens in a performance review?", "Whats your whistleblower policy?", ] ) } ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, }, ) time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "Does my plan cover eye exams?"}, ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, </s> ===========changed ref 4=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 <s>_captions": False, "top": 3, "suggest_followup_questions": False, }, }, )
tests.test_app/test_ask_rtr_text_semanticranker
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rtr",
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): <0> response = await client.post( <1> "/ask", <2> json={ <3> "approach": "rtr", <4> "question": "What is the capital of France?", <5> "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_ask_rtr_text_semanticranker response = await client.post( "/ask", json={ "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 4=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): self.client.get("/") time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": random.choice( [ "What is included in my Northwind Health Plus plan that is not in standard?", "What does a Product Manager do?", "What happens in a performance review?", "Whats your whistleblower policy?", ] ) } ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, }, ) time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "Does my plan cover eye exams?"}, ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, </s> ===========changed ref 5=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 <s>_captions": False, "top": 3, "suggest_followup_questions": False, }, }, )
tests.test_app/test_ask_rtr_text_semanticcaptions
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rtr",
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): <0> response = await client.post( <1> "/ask", <2> json={ <3> "approach": "rtr", <4> "question": "What is the capital of France?", <5> "overrides": {"retrieval_mode": "text", "semantic_captions": True}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_ask_rtr_text_semanticcaptions response = await client.post( "/ask", json={ "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 5=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): self.client.get("/") time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": random.choice( [ "What is included in my Northwind Health Plus plan that is not in standard?", "What does a Product Manager do?", "What happens in a performance review?", "Whats your whistleblower policy?", ] ) } ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, }, ) time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "Does my plan cover eye exams?"}, ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, </s> ===========changed ref 6=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 <s>_captions": False, "top": 3, "suggest_followup_questions": False, }, }, )
tests.test_app/test_ask_rtr_hybrid
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rtr",
# module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): <0> response = await client.post( <1> "/ask", <2> json={ <3> "approach": "rtr", <4> "question": "What is the capital of France?", <5> "overrides": {"retrieval_mode": "hybrid"}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_ask_rtr_hybrid response = await client.post( "/ask", json={ "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 6=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): self.client.get("/") time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": random.choice( [ "What is included in my Northwind Health Plus plan that is not in standard?", "What does a Product Manager do?", "What happens in a performance review?", "Whats your whistleblower policy?", ] ) } ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, }, ) time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "Does my plan cover eye exams?"}, ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, </s> ===========changed ref 7=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 <s>_captions": False, "top": 3, "suggest_followup_questions": False, }, }, )
tests.test_app/test_chat_text
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "approach": "rrr", <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": {"retrieval_mode": "text"}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: tests.test_app.test_chat_text_filter response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ "history": [{"user": "What is the capital of France?"}], "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 8=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): self.client.get("/") time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": random.choice( [ "What is included in my Northwind Health Plus plan that is not in standard?", "What does a Product Manager do?", "What happens in a performance review?", "Whats your whistleblower policy?", ] ) } ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, }, ) time.sleep(5) self.client.post( "/chat", json={ "history": [ { "user": "What happens in a performance review?", "bot": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", }, {"user": "Does my plan cover eye exams?"}, ], - "approach": "rrr", "overrides": { "retrieval_mode": "hybrid", "semantic_ranker": True, "semantic_captions": False, "top": 3, "suggest_followup_questions": False, }, </s> ===========changed ref 9=========== # module: locustfile class ChatUser(HttpUser): @task def ask_question(self): # offset: 1 <s>_captions": False, "top": 3, "suggest_followup_questions": False, }, }, )
tests.test_app/test_chat_text_filter
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<4>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): <0> response = await auth_client.post( <1> "/chat", <2> headers={"Authorization": "Bearer MockToken"}, <3> json={ <4> "approach": "rrr", <5> "history": [{"user": "What is the capital of France?"}], <6> "overrides": { <7> "retrieval_mode": "text", <8> "use_oid_security_filter": True, <9> "use_groups_security_filter": True, <10> "exclude_category": "excluded", <11> }, <12> }, <13> ) <14> assert response.status_code == 200 <15> assert ( <16> auth_client.config[app.CONFIG_SEARCH_CLIENT].filter <17> == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" <18> ) <19> result = await response.get_json() <20> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <21>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_text_filter response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ "history": [{"user": "What is the capital of France?"}], "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... -
tests.test_app/test_chat_text_semanticranker
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "approach": "rrr", <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_text_semanticcaptions response = await client.post( "/chat", json={ "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... -
tests.test_app/test_chat_text_semanticcaptions
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "approach": "rrr", <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": {"retrieval_mode": "text", "semantic_captions": True}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_prompt_template response = await client.post( "/chat", json={ "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... -
tests.test_app/test_chat_prompt_template
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "approach": "rrr", <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_prompt_template_concat response = await client.post( "/chat", json={ "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_filter(auth_client, snapshot): response = await auth_client.post( "/ask", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_prompt_template_concat
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "approach": "rrr", <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_hybrid response = await client.post( "/chat", json={ "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "hybrid"}, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_hybrid
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "approach": "rrr", <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": {"retrieval_mode": "hybrid"}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: tests.test_app.test_chat_vector response = await client.post( "/chat", json={ "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "vector"}, }, ) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_vector
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): <0> response = await client.post( <1> "/chat", <2> json={ <3> "approach": "rrr", <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": {"retrieval_mode": "vector"}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_json() <10> snapshot.assert_match(json.dumps(result, indent=4), "result.json") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 11=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 12=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_filter(auth_client, snapshot): response = await auth_client.post( "/chat", headers={"Authorization": "Bearer MockToken"}, json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) assert response.status_code == 200 assert ( auth_client.config[app.CONFIG_SEARCH_CLIENT].filter == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" ) result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_stream_text
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<3>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text(client, snapshot): <0> response = await client.post( <1> "/chat_stream", <2> json={ <3> "approach": "rrr", <4> "history": [{"user": "What is the capital of France?"}], <5> "overrides": {"retrieval_mode": "text"}, <6> }, <7> ) <8> assert response.status_code == 200 <9> result = await response.get_data() <10> snapshot.assert_match(result, "result.jsonlines") <11>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) at: tests.test_app.test_chat_stream_text_filter response = await auth_client.post( "/chat_stream", headers={"Authorization": "Bearer MockToken"}, json={ "history": [{"user": "What is the capital of France?"}], "overrides": { "retrieval_mode": "text", "use_oid_security_filter": True, "use_groups_security_filter": True, "exclude_category": "excluded", }, }, ) ===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_with_unknown_approach(client): - response = await client.post("/chat_stream", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "vector"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 11=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 12=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
tests.test_app/test_chat_stream_text_filter
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<4>:<del> "approach": "rrr",
# module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text_filter(auth_client, snapshot): <0> response = await auth_client.post( <1> "/chat_stream", <2> headers={"Authorization": "Bearer MockToken"}, <3> json={ <4> "approach": "rrr", <5> "history": [{"user": "What is the capital of France?"}], <6> "overrides": { <7> "retrieval_mode": "text", <8> "use_oid_security_filter": True, <9> "use_groups_security_filter": True, <10> "exclude_category": "excluded", <11> }, <12> }, <13> ) <14> assert response.status_code == 200 <15> assert ( <16> auth_client.config[app.CONFIG_SEARCH_CLIENT].filter <17> == "category ne 'excluded' and (oids/any(g:search.in(g, 'OID_X')) or groups/any(g:search.in(g, 'GROUP_Y, GROUP_Z')))" <18> ) <19> result = await response.get_data() <20> snapshot.assert_match(result, "result.jsonlines") <21>
===========changed ref 0=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_with_unknown_approach(client): - response = await client.post("/chat_stream", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 1=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text(client, snapshot): response = await client.post( "/chat_stream", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_data() snapshot.assert_match(result, "result.jsonlines") ===========changed ref 2=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "vector"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 3=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 4=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template_concat(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": ">>> Meow like a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 5=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_prompt_template(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "prompt_template": "You are a cat."}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticcaptions(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text_semanticranker(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 11=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 12=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 13=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticranker(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_ranker": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<5>:<del> approach = request_json["approach"] <7>:<add> impl = current_app.config[CONFIG_ASK_APPROACH] <del> impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) <8>:<del> if not impl: <9>:<del> return jsonify({"error": "unknown approach"}), 400
# module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <4> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <5> approach = request_json["approach"] <6> try: <7> impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) <8> if not impl: <9> return jsonify({"error": "unknown approach"}), 400 <10> # Workaround for: https://github.com/openai/openai-python/issues/371 <11> async with aiohttp.ClientSession() as s: <12> openai.aiosession.set(s) <13> r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims) <14> return jsonify(r) <15> except Exception as e: <16> logging.exception("Exception in /ask") <17> return jsonify({"error": str(e)}), 500 <18>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_ASK_APPROACH = "ask_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. ===========changed ref 0=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" + CONFIG_ASK_APPROACH = "ask_approach" - CONFIG_ASK_APPROACHES = "ask_approaches" + CONFIG_CHAT_APPROACH = "chat_approach" - CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 1=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 2=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 3=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 4=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_with_unknown_approach(client): - response = await client.post("/chat_stream", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 5=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text(client, snapshot): response = await client.post( "/chat_stream", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_data() snapshot.assert_match(result, "result.jsonlines") ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "vector"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_text(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_hybrid(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 11=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text_semanticcaptions(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text", "semantic_captions": True}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<5>:<del> approach = request_json["approach"] <7>:<add> impl = current_app.config[CONFIG_CHAT_APPROACH] <del> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <8>:<del> if not impl: <9>:<del> return jsonify({"error": "unknown approach"}), 400
# module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <4> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <5> approach = request_json["approach"] <6> try: <7> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <8> if not impl: <9> return jsonify({"error": "unknown approach"}), 400 <10> # Workaround for: https://github.com/openai/openai-python/issues/371 <11> async with aiohttp.ClientSession() as s: <12> openai.aiosession.set(s) <13> r = await impl.run_without_streaming( <14> request_json["history"], request_json.get("overrides", {}), auth_claims <15> ) <16> return jsonify(r) <17> except Exception as e: <18> logging.exception("Exception in /chat") <19> return jsonify({"error": str(e)}), 500 <20>
===========unchanged ref 0=========== at: _contextvars.ContextVar set(value, /) at: aiohttp.client ClientSession(base_url: Optional[StrOrURL]=None, *, connector: Optional[BaseConnector]=None, loop: Optional[asyncio.AbstractEventLoop]=None, cookies: Optional[LooseCookies]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Optional[Iterable[str]]=None, auth: Optional[BasicAuth]=None, json_serialize: JSONEncoder=json.dumps, request_class: Type[ClientRequest]=ClientRequest, response_class: Type[ClientResponse]=ClientResponse, ws_response_class: Type[ClientWebSocketResponse]=ClientWebSocketResponse, version: HttpVersion=http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar]=None, connector_owner: bool=True, raise_for_status: bool=False, read_timeout: Union[float, object]=sentinel, conn_timeout: Optional[float]=None, timeout: Union[object, ClientTimeout]=sentinel, auto_decompress: bool=True, trust_env: bool=False, requote_redirect_url: bool=True, trace_configs: Optional[List[TraceConfig]]=None, read_bufsize: int=2**16, fallback_charset_resolver: _CharsetResolver=( _default_fallback_charset_resolver )) at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") at: app.backend.app.chat request_json = await request.get_json() ===========unchanged ref 1=========== at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: openai aiosession: ContextVar[Optional["ClientSession"]] = ContextVar( "aiohttp-session", default=None ) # Acts as a global aiohttp ClientSession that reuses connections. at: typing AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2) ===========changed ref 0=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" + CONFIG_ASK_APPROACH = "ask_approach" - CONFIG_ASK_APPROACHES = "ask_approaches" + CONFIG_CHAT_APPROACH = "chat_approach" - CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 1=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() auth_helper = current_app.config[CONFIG_AUTH_CLIENT] auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) - approach = request_json["approach"] try: + impl = current_app.config[CONFIG_ASK_APPROACH] - impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) - if not impl: - return jsonify({"error": "unknown approach"}), 400 # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 2=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 3=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 4=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 5=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_with_unknown_approach(client): - response = await client.post("/chat_stream", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 6=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text(client, snapshot): response = await client.post( "/chat_stream", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_data() snapshot.assert_match(result, "result.jsonlines") ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
app.backend.app/chat_stream
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
<5>:<del> approach = request_json["approach"] <7>:<add> impl = current_app.config[CONFIG_CHAT_APPROACH] <del> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <8>:<del> if not impl: <9>:<del> return jsonify({"error": "unknown approach"}), 400
# module: app.backend.app @bp.route("/chat_stream", methods=["POST"]) async def chat_stream(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> request_json = await request.get_json() <3> auth_helper = current_app.config[CONFIG_AUTH_CLIENT] <4> auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) <5> approach = request_json["approach"] <6> try: <7> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <8> if not impl: <9> return jsonify({"error": "unknown approach"}), 400 <10> response_generator = impl.run_with_streaming( <11> request_json["history"], request_json.get("overrides", {}), auth_claims <12> ) <13> response = await make_response(format_as_ndjson(response_generator)) <14> response.timeout = None # type: ignore <15> return response <16> except Exception as e: <17> logging.exception("Exception in /chat") <18> return jsonify({"error": str(e)}), 500 <19>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_AUTH_CLIENT = "auth_client" bp = Blueprint("routes", __name__, static_folder="static") format_as_ndjson(r: AsyncGenerator[dict, None]) -> AsyncGenerator[str, None] at: app.backend.app.chat_stream request_json = await request.get_json() auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" + CONFIG_ASK_APPROACH = "ask_approach" - CONFIG_ASK_APPROACHES = "ask_approaches" + CONFIG_CHAT_APPROACH = "chat_approach" - CONFIG_CHAT_APPROACHES = "chat_approaches" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") ===========changed ref 1=========== # module: app.backend.app @bp.route("/chat", methods=["POST"]) async def chat(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() auth_helper = current_app.config[CONFIG_AUTH_CLIENT] auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) - approach = request_json["approach"] try: + impl = current_app.config[CONFIG_CHAT_APPROACH] - impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) - if not impl: - return jsonify({"error": "unknown approach"}), 400 # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) r = await impl.run_without_streaming( request_json["history"], request_json.get("overrides", {}), auth_claims ) return jsonify(r) except Exception as e: logging.exception("Exception in /chat") return jsonify({"error": str(e)}), 500 ===========changed ref 2=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) async def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() auth_helper = current_app.config[CONFIG_AUTH_CLIENT] auth_claims = await auth_helper.get_auth_claims_if_enabled(request.headers) - approach = request_json["approach"] try: + impl = current_app.config[CONFIG_ASK_APPROACH] - impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) - if not impl: - return jsonify({"error": "unknown approach"}), 400 # Workaround for: https://github.com/openai/openai-python/issues/371 async with aiohttp.ClientSession() as s: openai.aiosession.set(s) r = await impl.run(request_json["question"], request_json.get("overrides") or {}, auth_claims) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 3=========== # module: app.backend.approaches.approach - class AskApproach(Approach): - @abstractmethod - async def run(self, q: str, overrides: dict[str, Any], auth_claims: dict[str, Any]) -> dict[str, Any]: - ... - ===========changed ref 4=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_with_unknown_approach(client): - response = await client.post("/chat", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 5=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_ask_with_unknown_approach(client): - response = await client.post("/ask", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 6=========== # module: tests.test_app - @pytest.mark.asyncio - async def test_chat_stream_with_unknown_approach(client): - response = await client.post("/chat_stream", json={"approach": "test"}) - assert response.status_code == 400 - ===========changed ref 7=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_stream_text(client, snapshot): response = await client.post( "/chat_stream", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_data() snapshot.assert_match(result, "result.jsonlines") ===========changed ref 8=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_text(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "text"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 9=========== # module: tests.test_app @pytest.mark.asyncio async def test_ask_rtr_hybrid(client, snapshot): response = await client.post( "/ask", json={ - "approach": "rtr", "question": "What is the capital of France?", "overrides": {"retrieval_mode": "hybrid"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json") ===========changed ref 10=========== # module: tests.test_app @pytest.mark.asyncio async def test_chat_vector(client, snapshot): response = await client.post( "/chat", json={ - "approach": "rrr", "history": [{"user": "What is the capital of France?"}], "overrides": {"retrieval_mode": "vector"}, }, ) assert response.status_code == 200 result = await response.get_json() snapshot.assert_match(json.dumps(result, indent=4), "result.json")
app.backend.app/setup_clients
Modified
Azure-Samples~azure-search-openai-demo
f10946742b93c8e2e16483d0c059f53f6b4ff615
Remove broken langchain-based approaches (#738)
# module: app.backend.app @bp.before_app_serving async def setup_clients(): <0> # Replace these with your own values, either in environment variables or directly here <1> AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] <2> AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] <3> AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] <4> AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] <5> # Shared by all OpenAI deployments <6> OPENAI_HOST = os.getenv("OPENAI_HOST", "azure") <7> OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] <8> OPENAI_EMB_MODEL = os.getenv("AZURE_OPENAI_EMB_MODEL_NAME", "text-embedding-ada-002") <9> # Used with Azure OpenAI deployments <10> AZURE_OPENAI_SERVICE = os.getenv("AZURE_OPENAI_SERVICE") <11> AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.getenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT") <12> AZURE_OPENAI_EMB_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMB_DEPLOYMENT") <13> # Used only with non-Azure OpenAI deployments <14> OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") <15> OPENAI_ORGANIZATION = os.getenv("OPENAI_ORGANIZATION") <16> AZURE_USE_AUTHENTICATION = os.getenv("AZURE_USE_AUTHENTICATION", "").lower() == "true" <17> AZURE_SERVER_APP_ID = os.getenv("AZURE_SERVER_APP_ID") <18> AZURE_SERVER_APP_SECRET = os.getenv("AZURE_SERVER_APP_SECRET") <19> AZURE_CLIENT_APP_ID = os.getenv("AZURE_CLIENT_APP_ID")</s>
===========below chunk 0=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 1 TOKEN_CACHE_PATH = os.getenv("TOKEN_CACHE_PATH") KB_FIELDS_CONTENT = os.getenv("KB_FIELDS_CONTENT", "content") KB_FIELDS_SOURCEPAGE = os.getenv("KB_FIELDS_SOURCEPAGE", "sourcepage") # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the # keys for each service # If you encounter a blocking error during a DefaultAzureCredential resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True) # Set up authentication helper auth_helper = AuthenticationHelper( use_authentication=AZURE_USE_AUTHENTICATION, server_app_id=AZURE_SERVER_APP_ID, server_app_secret=AZURE_SERVER_APP_SECRET, client_app_id=AZURE_CLIENT_APP_ID, tenant_id=AZURE_TENANT_ID, token_cache_path=TOKEN_CACHE_PATH, ) # Set up clients for Cognitive Search and Storage search_client = SearchClient( endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", index_name=AZURE_SEARCH_INDEX, credential=azure_credential, ) blob_client = BlobServiceClient( account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential ) blob_container_client = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) # Used by the OpenAI SDK if OPENAI_HOST == "azure": openai.api</s> ===========below chunk 1=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 2 <s>_CONTAINER) # Used by the OpenAI SDK if OPENAI_HOST == "azure": openai.api_type = "azure_ad" openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" openai.api_version = "2023-07-01-preview" openai_token = await azure_credential.get_token("https://cognitiveservices.azure.com/.default") openai.api_key = openai_token.token # Store on app.config for later use inside requests current_app.config[CONFIG_OPENAI_TOKEN] = openai_token else: openai.api_type = "openai" openai.api_key = OPENAI_API_KEY openai.organization = OPENAI_ORGANIZATION current_app.config[CONFIG_CREDENTIAL] = azure_credential current_app.config[CONFIG_SEARCH_CLIENT] = search_client current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client current_app.config[CONFIG_AUTH_CLIENT] = auth_helper # Various approaches to integrate GPT and external knowledge, most applications will use a single one of these patterns # or some derivative, here we include several for exploration purposes current_app.config[CONFIG_ASK_APPROACHES] = { "rtr": RetrieveThenReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, )</s> ===========below chunk 2=========== # module: app.backend.app @bp.before_app_serving async def setup_clients(): # offset: 3 <s> "rrr": ReadRetrieveReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), "rda": ReadDecomposeAsk( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ), } current_app.config[CONFIG_CHAT_APPROACHES] = { "rrr": ChatReadRetrieveReadApproach( search_client, OPENAI_HOST, AZURE_OPENAI_CHATGPT_DEPLOYMENT, OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, OPENAI_EMB_MODEL, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT, ) } ===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" CONFIG_ASK_APPROACH = "ask_approach" CONFIG_CHAT_APPROACH = "chat_approach" CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_SEARCH_CLIENT = "search_client" bp = Blueprint("routes", __name__, static_folder="static") at: app.backend.app.setup_clients AZURE_STORAGE_ACCOUNT = os.environ["AZURE_STORAGE_ACCOUNT"] AZURE_STORAGE_CONTAINER = os.environ["AZURE_STORAGE_CONTAINER"] AZURE_SEARCH_SERVICE = os.environ["AZURE_SEARCH_SERVICE"] AZURE_SEARCH_INDEX = os.environ["AZURE_SEARCH_INDEX"] OPENAI_HOST = os.getenv("OPENAI_HOST", "azure") OPENAI_CHATGPT_MODEL = os.environ["AZURE_OPENAI_CHATGPT_MODEL"] OPENAI_EMB_MODEL = os.getenv("AZURE_OPENAI_EMB_MODEL_NAME", "text-embedding-ada-002") at: approaches.chatreadretrieveread ChatReadRetrieveReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str) at: approaches.retrievethenread RetrieveThenReadApproach(search_client: SearchClient, openai_host: str, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, embedding_model: str, sourcepage_field: str, content_field: str)
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
969272dd68d2bde4a3f4172f1bc330d7cfe3ba3f
Normalize text in messages to reduce token length (#688)
<6>:<del> user_content = user_conv <12>:<add> if message_builder.token_length > max_tokens: <add> break <16>:<del> if message_builder.token_length > max_tokens: <17>:<del> break <19>:<add> return message_builder.messages <del> messages = message_builder.messages <20>:<del> return messages
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def get_messages_from_history( self, system_prompt: str, model_id: str, history: list[dict[str, str]], + user_content: str, - user_conv: str, few_shots=[], max_tokens: int = 4096, ) -> list: <0> message_builder = MessageBuilder(system_prompt, model_id) <1> <2> # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. <3> for shot in few_shots: <4> message_builder.append_message(shot.get("role"), shot.get("content")) <5> <6> user_content = user_conv <7> append_index = len(few_shots) + 1 <8> <9> message_builder.append_message(self.USER, user_content, index=append_index) <10> <11> for h in reversed(history[:-1]): <12> if bot_msg := h.get("bot"): <13> message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index) <14> if user_msg := h.get("user"): <15> message_builder.append_message(self.USER, user_msg, index=append_index) <16> if message_builder.token_length > max_tokens: <17> break <18> <19> messages = message_builder.messages <20> return messages <21>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" NO_RESPONSE = "0" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. You have access to Azure Cognitive Search index with 100's of documents. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {"role": USER, "content": "What are my health plans?"}, {"role": ASSISTANT, "content": "Show available health plans"}, {"role": USER, "content": "does my plan cover cardio?"}, {"role": ASSISTANT, "content": "Health plan cardio coverage"}, ] at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{"role": "system", "content": self.normalize_content(system_content)}] self.token_length = num_tokens_from_messages(self.messages[-1], self.model) at: core.messagebuilder.MessageBuilder.append_message self.token_length += num_tokens_from_messages(self.messages[index], self.model) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: tests.test_messagebuilder + def test_messagebuilder_unicode(): + builder = MessageBuilder("a\u0301", "gpt-35-turbo") + assert builder.messages == [ + # 1 token, 1 token, 1 token, 1 token + {"role": "system", "content": "á"} + ] + assert builder.model == "gpt-35-turbo" + assert builder.token_length == 4 + ===========changed ref 1=========== # module: tests.test_messagebuilder + def test_messagebuilder_unicode_append(): + builder = MessageBuilder("a\u0301", "gpt-35-turbo") + builder.append_message("user", "a\u0301") + assert builder.messages == [ + # 1 token, 1 token, 1 token, 1 token + {"role": "system", "content": "á"}, + # 1 token, 1 token, 1 token, 1 token + {"role": "user", "content": "á"}, + ] + assert builder.model == "gpt-35-turbo" + assert builder.token_length == 8 +