path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
pywhat/__dir__
Modified
bee-san~pyWhat
2f4e0ccb1d6b5508dc6ba61e40fb3dfe3b413ba0
Merge branch 'main' into flake8
<0>:<add> return __all__ + ["__version__"] <del> return _contents + ["__version__"]
# module: pywhat def __dir__(): <0> return _contents + ["__version__"] <1>
===========changed ref 0=========== # module: pywhat __version__ = "4.3.1" tags = AvailableTags().get_tags() pywhat_tags = tags # left for backward compatibility purposes + __all__ = ["Identifier", "Distribution", "tags", "pywhat_tags", "Keys", "Filter"] - _contents = ["Identifier", "Distribution", "tags", "pywhat_tags", "Keys", "Filter"] - - __all__ = _contents del AvailableTags, filter ===========changed ref 1=========== # module: pywhat.helper class Keys(Enum): + def MATCHED(match): + return match["Matched"] + ===========changed ref 2=========== # module: pywhat.helper class Keys(Enum): + def NAME(match): + return match["Regex Pattern"]["Name"] + ===========changed ref 3=========== # module: pywhat.helper class Keys(Enum): + def RARITY(match): + return match["Regex Pattern"]["Rarity"] + ===========changed ref 4=========== # module: pywhat.helper class Keys(Enum): - NAME = lambda match: match["Regex Pattern"]["Name"] - RARITY = lambda match: match["Regex Pattern"]["Rarity"] - MATCHED = lambda match: match["Matched"] NONE = auto() ===========changed ref 5=========== # module: pywhat.filter class Filter(Mapping): def __contains__(self, item): try: return ( self["MinRarity"] <= item["Rarity"] <= self["MaxRarity"] and set(item["Tags"]) & self["Tags"] and not set(item["Tags"]) & self["ExcludeTags"] ) + except KeyError: - except: return False
pywhat.what/main
Modified
bee-san~pyWhat
2f4e0ccb1d6b5508dc6ba61e40fb3dfe3b413ba0
Merge branch 'main' into flake8
<s> pywhat.", ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): <0> """ <1> pyWhat - Identify what something is. <2> <3> Made by Bee https://twitter.com/bee_sec_san <4> <5> https://github.com/bee-san <6> <7> Filtration: <8> <9> --rarity min:max <10> <11> Rarity is how unlikely something is to be a false-positive. The higher the number, the more unlikely. <12> <13> Only print entries with rarity in range [min,max]. min and max can be omitted. <14> <15> Note: PyWhat by default has a rarity of 0.1. To see all matches, with many potential false positives use `0:`. <16> <17> --include list <18> <19> Only include entries containing at least one tag in a list. List is a comma separated list. <20> <21> --exclude list <22> <23> Exclude specified tags. List is a comma separated list. <24> <25> Sorting: <26> <27> --key key_name <28> <29> Sort by the given key. <30> <31> --reverse <32> <33> Sort in reverse order. <34> <35> Available keys: <36> <37> name - Sort by the name of regex pattern <38> <39> rarity - Sort by rarity <40> <41> matched - Sort by a matched string <42> <43> none - No sorting is done (the default) <44> <45> Exporting: <46> </s>
===========below chunk 0=========== <s> ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 1 Return results in json format. Boundaryless mode: CLI tool matches strings like 'abcdTHM{hello}plze' by default because the boundaryless mode is enabled for regexes with a rarity of 0.1 and higher. Since boundaryless mode may produce a lot of false-positive matches, it is possible to disable it, either fully or partially. '--disable-boundaryless' flag can be used to fully disable this mode. In addition, '-br', '-bi', and '-be' options can be used to tweak which regexes should be in boundaryless mode. Refer to the Filtration section for more information. Formatting the output: --format format_str format_str can be equal to: pretty - Output data in the table json - Ouput data in json format CUSTOM_STRING - Print data in the way you want. For every match CUSTOM_STRING will be printed and '%x' (See below for possible x values) will be substituted with a match value. For example: pywhat --format '%m - %n' 'google.com htb{flag}' will print: htb{flag} - HackTheBox Flag Format google.com - Uniform Resource Locator (URL) Possible '%x' values: %m - matched text %n - name of regex %d - description</s> ===========below chunk 1=========== <s> ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 2 <s> %m - matched text %n - name of regex %d - description (will not output if absent) %e - exploit (will not ouput if absent) %r - rarity %l - link (will not ouput if absent) %t - tags (in 'tag1, tag2 ...' format) If you want to print '%' or '\' character - escape it: '\%', '\\'. Examples: * what 'HTB{this is a flag}' * what '0x52908400098527886E0F7030069857D2E4169EE7' * what -- '52.6169586, -1.9779857' * what --rarity 0.6: '[email protected]' * what --rarity 0: --include "credentials, username, password" --exclude "aws, credentials" 'James:SecretPassword' * what -br 0.6: -be URL '[email protected]' Your text must either be in quotation marks, or use the POSIX standard of "--" to mean "anything after -- is textual input". pyWhat can also search files or even a whole directory with recursion</s> ===========below chunk 2=========== <s> ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 3 <s> * what 'secret.txt' * what 'this/is/a/path' """ if kwargs["text_input"] is None: sys.exit("Text input expected. Run 'pywhat --help' for help") dist = Distribution( create_filter(kwargs["rarity"], kwargs["include"], kwargs["exclude"]) ) if kwargs["disable_boundaryless"]: boundaryless = Filter({"Tags": []}) # use empty filter else: boundaryless = create_filter( kwargs["boundaryless_rarity"], kwargs["boundaryless_include"], kwargs["boundaryless_exclude"], ) what_obj = What_Object(dist) if kwargs["key"] is None: key = Keys.NONE else: try: key = str_to_key(kwargs["key"]) except ValueError: print("Invalid key") sys.exit(1) identified_output = what_obj.what_is_this( kwargs["text_input"], kwargs["only_text"], key, kwargs["reverse"], boundaryless, kwargs["include_filenames"], ) p = printer.Printing() </s> ===========below chunk 3=========== <s> ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 4 <s> if kwargs["json"] or str(kwargs["format"]).strip() == "json": p.print_json(identified_output) elif str(kwargs["format"]).strip() == "pretty": p.pretty_print(identified_output, kwargs["text_input"], kwargs["print_tags"]) elif kwargs["format"] is not None: p.format_print(identified_output, kwargs["format"]) else: p.print_raw(identified_output, kwargs["text_input"], kwargs["print_tags"])
tests.test_filtration/test_distribution7
Modified
bee-san~pyWhat
2f4e0ccb1d6b5508dc6ba61e40fb3dfe3b413ba0
Merge branch 'main' into flake8
<1>:<add> Distribution({"Tags": "Media", "MinRarity": 0.7}) <del> dist = Distribution({"Tags": "Media", "MinRarity": 0.7})
# module: tests.test_filtration def test_distribution7(): <0> with pytest.raises(InvalidTag): <1> dist = Distribution({"Tags": "Media", "MinRarity": 0.7}) <2>
===========unchanged ref 0=========== at: _pytest.python_api raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], func: Callable[..., Any], *args: Any, **kwargs: Any) -> _pytest._code.ExceptionInfo[E] raises(expected_exception: Union[Type[E], Tuple[Type[E], ...]], *, match: Optional[Union[str, Pattern[str]]]=...) -> "RaisesContext[E]" at: pywhat.filter Distribution(filter: Optional[Filter]=None) at: pywhat.helper InvalidTag(*args: object) ===========changed ref 0=========== # module: pywhat.helper class Keys(Enum): + def MATCHED(match): + return match["Matched"] + ===========changed ref 1=========== # module: pywhat.helper class Keys(Enum): + def NAME(match): + return match["Regex Pattern"]["Name"] + ===========changed ref 2=========== # module: pywhat.helper class Keys(Enum): + def RARITY(match): + return match["Regex Pattern"]["Rarity"] + ===========changed ref 3=========== # module: pywhat def __dir__(): + return __all__ + ["__version__"] - return _contents + ["__version__"] ===========changed ref 4=========== # module: pywhat.helper class Keys(Enum): - NAME = lambda match: match["Regex Pattern"]["Name"] - RARITY = lambda match: match["Regex Pattern"]["Rarity"] - MATCHED = lambda match: match["Matched"] NONE = auto() ===========changed ref 5=========== # module: pywhat.filter class Filter(Mapping): def __contains__(self, item): try: return ( self["MinRarity"] <= item["Rarity"] <= self["MaxRarity"] and set(item["Tags"]) & self["Tags"] and not set(item["Tags"]) & self["ExcludeTags"] ) + except KeyError: - except: return False ===========changed ref 6=========== # module: pywhat __version__ = "4.3.1" tags = AvailableTags().get_tags() pywhat_tags = tags # left for backward compatibility purposes + __all__ = ["Identifier", "Distribution", "tags", "pywhat_tags", "Keys", "Filter"] - _contents = ["Identifier", "Distribution", "tags", "pywhat_tags", "Keys", "Filter"] - - __all__ = _contents del AvailableTags, filter ===========changed ref 7=========== <s> pywhat.", ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): """ pyWhat - Identify what something is. Made by Bee https://twitter.com/bee_sec_san https://github.com/bee-san Filtration: --rarity min:max Rarity is how unlikely something is to be a false-positive. The higher the number, the more unlikely. Only print entries with rarity in range [min,max]. min and max can be omitted. Note: PyWhat by default has a rarity of 0.1. To see all matches, with many potential false positives use `0:`. --include list Only include entries containing at least one tag in a list. List is a comma separated list. --exclude list Exclude specified tags. List is a comma separated list. Sorting: --key key_name Sort by the given key. --reverse Sort in reverse order. Available keys: name - Sort by the name of regex pattern rarity - Sort by rarity matched - Sort by a matched string none - No sorting is done (the default) Exporting: --json Return results in json format. Boundaryless mode: CLI tool matches strings like 'abcdTHM{hello}plze' by default because</s> ===========changed ref 8=========== <s> ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 1 <s> Boundaryless mode: CLI tool matches strings like 'abcdTHM{hello}plze' by default because the boundaryless mode is enabled for regexes with a rarity of 0.1 and higher. Since boundaryless mode may produce a lot of false-positive matches, it is possible to disable it, either fully or partially. '--disable-boundaryless' flag can be used to fully disable this mode. In addition, '-br', '-bi', and '-be' options can be used to tweak which regexes should be in boundaryless mode. Refer to the Filtration section for more information. Formatting the output: --format format_str format_str can be equal to: pretty - Output data in the table json - Ouput data in json format CUSTOM_STRING - Print data in the way you want. For every match CUSTOM_STRING will be printed and '%x' (See below for possible x values) will be substituted with a match value. For example: pywhat --format '%m - %n' 'google.com htb{flag}' will print: htb{flag} - HackTheBox Flag Format google.com - Uniform Resource Locator (URL) Possible '%x' values: %m - matched</s>
tests.test_identifier/test_only_text
Modified
bee-san~pyWhat
2f4e0ccb1d6b5508dc6ba61e40fb3dfe3b413ba0
Merge branch 'main' into flake8
<1>:<add> assert out["Regexes"] is None <del> assert None == out["Regexes"]
# module: tests.test_identifier def test_only_text(): <0> out = r.identify("fixtures/file") <1> assert None == out["Regexes"] <2> <3> out = r.identify("THM{7281j}}", only_text=True) <4> assert "TryHackMe Flag Format" in out["Regexes"]["text"][0]["Regex Pattern"]["Name"] <5>
===========unchanged ref 0=========== at: pywhat.identifier.Identifier identify(text: str, *, only_text=True, dist: Distribution=None, key: Optional[Callable]=None, reverse: Optional[bool]=None, boundaryless: Optional[Filter]=None, include_filenames=False) -> dict at: tests.test_identifier r = identifier.Identifier() ===========changed ref 0=========== # module: pywhat.helper class Keys(Enum): + def MATCHED(match): + return match["Matched"] + ===========changed ref 1=========== # module: pywhat.helper class Keys(Enum): + def NAME(match): + return match["Regex Pattern"]["Name"] + ===========changed ref 2=========== # module: pywhat.helper class Keys(Enum): + def RARITY(match): + return match["Regex Pattern"]["Rarity"] + ===========changed ref 3=========== # module: tests.test_filtration + def test_invalid_contains_returns_false(): + filter = Filter() + assert ({} in filter) is False + ===========changed ref 4=========== # module: pywhat def __dir__(): + return __all__ + ["__version__"] - return _contents + ["__version__"] ===========changed ref 5=========== # module: tests.test_filtration def test_distribution7(): with pytest.raises(InvalidTag): + Distribution({"Tags": "Media", "MinRarity": 0.7}) - dist = Distribution({"Tags": "Media", "MinRarity": 0.7}) ===========changed ref 6=========== # module: pywhat.helper class Keys(Enum): - NAME = lambda match: match["Regex Pattern"]["Name"] - RARITY = lambda match: match["Regex Pattern"]["Rarity"] - MATCHED = lambda match: match["Matched"] NONE = auto() ===========changed ref 7=========== # module: pywhat.filter class Filter(Mapping): def __contains__(self, item): try: return ( self["MinRarity"] <= item["Rarity"] <= self["MaxRarity"] and set(item["Tags"]) & self["Tags"] and not set(item["Tags"]) & self["ExcludeTags"] ) + except KeyError: - except: return False ===========changed ref 8=========== # module: pywhat __version__ = "4.3.1" tags = AvailableTags().get_tags() pywhat_tags = tags # left for backward compatibility purposes + __all__ = ["Identifier", "Distribution", "tags", "pywhat_tags", "Keys", "Filter"] - _contents = ["Identifier", "Distribution", "tags", "pywhat_tags", "Keys", "Filter"] - - __all__ = _contents del AvailableTags, filter ===========changed ref 9=========== <s> pywhat.", ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): """ pyWhat - Identify what something is. Made by Bee https://twitter.com/bee_sec_san https://github.com/bee-san Filtration: --rarity min:max Rarity is how unlikely something is to be a false-positive. The higher the number, the more unlikely. Only print entries with rarity in range [min,max]. min and max can be omitted. Note: PyWhat by default has a rarity of 0.1. To see all matches, with many potential false positives use `0:`. --include list Only include entries containing at least one tag in a list. List is a comma separated list. --exclude list Exclude specified tags. List is a comma separated list. Sorting: --key key_name Sort by the given key. --reverse Sort in reverse order. Available keys: name - Sort by the name of regex pattern rarity - Sort by rarity matched - Sort by a matched string none - No sorting is done (the default) Exporting: --json Return results in json format. Boundaryless mode: CLI tool matches strings like 'abcdTHM{hello}plze' by default because</s> ===========changed ref 10=========== <s> ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 1 <s> Boundaryless mode: CLI tool matches strings like 'abcdTHM{hello}plze' by default because the boundaryless mode is enabled for regexes with a rarity of 0.1 and higher. Since boundaryless mode may produce a lot of false-positive matches, it is possible to disable it, either fully or partially. '--disable-boundaryless' flag can be used to fully disable this mode. In addition, '-br', '-bi', and '-be' options can be used to tweak which regexes should be in boundaryless mode. Refer to the Filtration section for more information. Formatting the output: --format format_str format_str can be equal to: pretty - Output data in the table json - Ouput data in json format CUSTOM_STRING - Print data in the way you want. For every match CUSTOM_STRING will be printed and '%x' (See below for possible x values) will be substituted with a match value. For example: pywhat --format '%m - %n' 'google.com htb{flag}' will print: htb{flag} - HackTheBox Flag Format google.com - Uniform Resource Locator (URL) Possible '%x' values: %m - matched</s>
pywhat.filter/Filter.__contains__
Modified
bee-san~pyWhat
4342bdc6011afa30dd03a196c7804ead223d3984
refactor: remove unnecessary try except
<0>:<del> try: <1>:<add> return ( <del> return ( <2>:<add> self["MinRarity"] <= item["Rarity"] <= self["MaxRarity"] <del> self["MinRarity"] <= item["Rarity"] <= self["MaxRarity"] <3>:<add> and set(item["Tags"]) & self["Tags"] <del> and set(item["Tags"]) & self["Tags"] <4>:<add> and not set(item["Tags"]) & self["ExcludeTags"] <del> and not set(item["Tags"]) & self["ExcludeTags"] <5>:<add> ) <del> ) <6>:<del> except KeyError: <7>:<del> return False
# module: pywhat.filter class Filter(Mapping): def __contains__(self, item): <0> try: <1> return ( <2> self["MinRarity"] <= item["Rarity"] <= self["MaxRarity"] <3> and set(item["Tags"]) & self["Tags"] <4> and not set(item["Tags"]) & self["ExcludeTags"] <5> ) <6> except KeyError: <7> return False <8>
===========unchanged ref 0=========== at: pywhat.filter.Filter.__init__ self._dict = {} at: typing.Mapping __contains__(self, o: object) -> bool
pywhat.regex_identifier/RegexIdentifier.check
Modified
bee-san~pyWhat
a80cabdcee42a8dc09ef4a5098b61b063dd58473
Merge branch 'main' into perf-regex-identifier-check-shallow
<12>:<add> reg_match = dict(reg) <del> reg_match = copy.deepcopy(reg)
# module: pywhat.regex_identifier class RegexIdentifier: def check( self, text, dist: Optional[Distribution] = None, *, boundaryless: Optional[Filter] = None ): <0> if dist is None: <1> dist = self.distribution <2> if boundaryless is None: <3> boundaryless = Filter({"Tags": []}) <4> matches = [] <5> <6> for string in text: <7> for reg in dist.get_regexes(): <8> regex = ( <9> reg["Boundaryless Regex"] if reg in boundaryless else reg["Regex"] <10> ) <11> for matched_regex in re.finditer(regex, string, re.MULTILINE): <12> reg_match = copy.deepcopy(reg) <13> matched = self.clean_text(matched_regex.group(0)) <14> <15> if ( <16> reg_match.get("Exploit") is not None <17> and "curl" in reg_match["Exploit"] <18> ): <19> # Replace anything like XXXXX_XXXXXX_HERE with the match <20> reg_match["Exploit"] = re.sub( <21> r"[A-Z_]+_HERE", matched, reg_match["Exploit"] <22> ) <23> <24> children = reg_match.get("Children") <25> if children is not None: <26> processed_match = re.sub( <27> children.get("deletion_pattern", ""), "", matched <28> ) <29> matched_children = [] <30> if children["method"] == "hashmap": <31> for length in children["lengths"]: <32> try: <33> matched_children.append( <34> children["Items"][processed_match[:length]] <35> ) <36> except KeyError: <37> continue <38> else: <39> for element in children["Items"]: <40> if ( <41> children["method"] == "regex" <42> and re.search( <43> element, processed_match, re.MULTILINE <44> ) <45> )</s>
===========below chunk 0=========== # module: pywhat.regex_identifier class RegexIdentifier: def check( self, text, dist: Optional[Distribution] = None, *, boundaryless: Optional[Filter] = None ): # offset: 1 children["method"] == "startswith" and processed_match.startswith(element) ): matched_children.append(children["Items"][element]) if matched_children: reg_match["Description"] = children.get( "entry", "" ) + ", ".join(matched_children) reg_match.pop("Children", None) matches.append( { "Matched": matched, "Regex Pattern": reg_match, } ) return matches ===========unchanged ref 0=========== at: pywhat.filter Filter(filters_dict=None) Distribution(filter: Optional[Filter]=None) at: pywhat.filter.Distribution get_regexes() at: pywhat.regex_identifier.RegexIdentifier clean_text(text) at: pywhat.regex_identifier.RegexIdentifier.__init__ self.distribution = Distribution() at: re MULTILINE = RegexFlag.MULTILINE search(pattern: Pattern[AnyStr], string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]] search(pattern: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]] sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr finditer(pattern: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Iterator[Match[AnyStr]] finditer(pattern: Pattern[AnyStr], string: AnyStr, flags: _FlagsType=...) -> Iterator[Match[AnyStr]] at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] at: typing.Match pos: int endpos: int lastindex: Optional[int] lastgroup: Optional[AnyStr] string: AnyStr re: Pattern[AnyStr] ===========unchanged ref 1=========== group(group1: Union[str, int], group2: Union[str, int], /, *groups: Union[str, int]) -> Tuple[AnyStr, ...] group(group: Union[str, int]=..., /) -> AnyStr
noxfile/tests
Modified
bee-san~pyWhat
67de66b1b5e4c0ad1d56cb1143f622c3e54e98f8
Merge branch 'main' into main
<8>:<add> "pytest-flake8",
# module: noxfile @nox.session def tests(session: Session) -> None: <0> """Run the test suite.""" <1> session.run("poetry", "install", "--no-dev", external=True) <2> install_with_constraints( <3> session, <4> "pytest", <5> "pytest-black", <6> "pytest-cov", <7> "pytest-isort", <8> "pytest-mypy", <9> "types-requests", <10> "types-orjson", <11> ) <12> session.run("pytest", "--cov=./", "--cov-report=xml") <13>
===========unchanged ref 0=========== at: noxfile install_with_constraints(session: Session, *args: str, **kwargs: Any) -> None
pywhat.what/main
Modified
bee-san~pyWhat
9ae9fcb770af3eb5769ca28510e92be52465b8e2
Merge branch 'main' into fix-ec2
<s> is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) + @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") - @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): <0> """ <1> pyWhat - Identify what something is. <2> <3> Made by Bee https://twitter.com/bee_sec_san <4> <5> https://github.com/bee-san <6> <7> Filtration: <8> <9> --rarity min:max <10> <11> Rarity is how unlikely something is to be a false-positive. The higher the number, the more unlikely. <12> <13> Only print entries with rarity in range [min,max]. min and max can be omitted. <14> <15> Note: PyWhat by default has a rarity of 0.1. To see all matches, with many potential false positives use `0:`. <16> <17> --include list <18> <19> Only include entries containing at least one tag in a list. List is a comma separated list. <20> <21> --exclude list <22> <23> Exclude specified tags. List is a comma separated list. <24> <25> Sorting: <26> <27> --key key_name <28> <29> Sort by the given key. <30> <31> --reverse <32> <33> Sort in reverse order. <34> <35> Available keys: <36> <37> name - Sort by the name of regex pattern <38> <39> rarity - Sort by rarity <40> <41> matched - Sort by a matched string <42> <43> none - No sorting is done (the default) <44> <45> Exporting: <46> </s>
===========below chunk 0=========== <s>True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) + @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") - @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 1 Return results in json format. Boundaryless mode: CLI tool matches strings like 'abcdTHM{hello}plze' by default because the boundaryless mode is enabled for regexes with a rarity of 0.1 and higher. Since boundaryless mode may produce a lot of false-positive matches, it is possible to disable it, either fully or partially. '--disable-boundaryless' flag can be used to fully disable this mode. In addition, '-br', '-bi', and '-be' options can be used to tweak which regexes should be in boundaryless mode. Refer to the Filtration section for more information. Formatting the output: --format format_str format_str can be equal to: pretty - Output data in the table json - Ouput data in json format CUSTOM_STRING - Print data in the way you want. For every match CUSTOM_STRING will be printed and '%x' (See below for possible x values) will be substituted with a match value. For example: pywhat --format '%m - %n' 'google.com htb{flag}' will print: htb{flag} - HackTheBox Flag Format google.com - Uniform Resource Locator (URL) Possible '%x' values: %m - matched text %n - name of regex %d - description</s> ===========below chunk 1=========== <s>True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) + @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") - @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 2 <s> %m - matched text %n - name of regex %d - description (will not output if absent) %e - exploit (will not ouput if absent) %r - rarity %l - link (will not ouput if absent) %t - tags (in 'tag1, tag2 ...' format) If you want to print '%' or '\\' character - escape it: '\\%', '\\\\'. Examples: * what 'HTB{this is a flag}' * what '0x52908400098527886E0F7030069857D2E4169EE7' * what -- '52.6169586, -1.9779857' * what --rarity 0.6: '[email protected]' * what --rarity 0: --include "credentials, username, password" --exclude "aws, credentials" 'James:SecretPassword' * what -br 0.6: -be URL '[email protected]' Your text must either be in quotation marks, or use the POSIX standard of "--" to mean "anything after -- is textual input". pyWhat can also search files or even a whole directory with</s> ===========below chunk 2=========== <s>True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) + @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") - @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 3 <s> * what 'secret.txt' * what 'this/is/a/path' """ if kwargs["text_input"] is None: sys.exit("Text input expected. Run 'pywhat --help' for help") dist = Distribution( create_filter(kwargs["rarity"], kwargs["include"], kwargs["exclude"]) ) if kwargs["disable_boundaryless"]: boundaryless = Filter({"Tags": []}) # use empty filter else: boundaryless = create_filter( kwargs["boundaryless_rarity"], kwargs["boundaryless_include"], kwargs["boundaryless_exclude"], ) what_obj = What_Object(dist) if kwargs["key"] is None: key = Keys.NONE else: try: key = str_to_key(kwargs["key"]) except ValueError: print("Invalid key") sys.exit(1) identified_output = what_obj.what_is_this( kwargs["text_input"], kwargs["only_text"], key, kwargs["reverse"], boundaryless, kwargs["include_filenames"], ) p = printer.Printing() </s> ===========below chunk 3=========== <s>True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) + @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") - @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to ouput") def main(**kwargs): # offset: 4 <s> if kwargs["json"] or str(kwargs["format"]).strip() == "json": p.print_json(identified_output) elif str(kwargs["format"]).strip() == "pretty": p.pretty_print(identified_output, kwargs["text_input"], kwargs["print_tags"]) elif kwargs["format"] is not None: p.format_print(identified_output, kwargs["format"]) else: p.print_raw(identified_output, kwargs["text_input"], kwargs["print_tags"])
pywhat.printer/Printing.print_json
Modified
bee-san~pyWhat
c89c57ca973bc4b94bdf1290067fa98db49fe61a
Fix json printing modified regex
<0>:<add> print(json.dumps(text)) <del> self.console.print(json.dumps(text))
# module: pywhat.printer class Printing: def print_json(self, text: dict): <0> self.console.print(json.dumps(text)) <1>
===========unchanged ref 0=========== at: json dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str
pywhat.what/main
Modified
bee-san~pyWhat
e2e081b0cebf9785eb0e38efb1b1245de151d51f
Fix example invocation with tags in help string
<s> version of pywhat.", ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") def main(**kwargs): <0> """ <1> pyWhat - Identify what something is. <2> <3> Made by Bee https://twitter.com/bee_sec_san <4> <5> https://github.com/bee-san <6> <7> Filtration: <8> <9> --rarity min:max <10> <11> Rarity is how unlikely something is to be a false-positive. The higher the number, the more unlikely. <12> <13> Only print entries with rarity in range [min,max]. min and max can be omitted. <14> <15> Note: PyWhat by default has a rarity of 0.1. To see all matches, with many potential false positives use `0:`. <16> <17> --include list <18> <19> Only include entries containing at least one tag in a list. List is a comma separated list. <20> <21> --exclude list <22> <23> Exclude specified tags. List is a comma separated list. <24> <25> Sorting: <26> <27> --key key_name <28> <29> Sort by the given key. <30> <31> --reverse <32> <33> Sort in reverse order. <34> <35> Available keys: <36> <37> name - Sort by the name of regex pattern <38> <39> rarity - Sort by rarity <40> <41> matched - Sort by a matched string <42> <43> none - No sorting is done (the default) <44> <45> Exporting: <46> </s>
===========below chunk 0=========== <s>, ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") def main(**kwargs): # offset: 1 Return results in json format. Boundaryless mode: CLI tool matches strings like 'abcdTHM{hello}plze' by default because the boundaryless mode is enabled for regexes with a rarity of 0.1 and higher. Since boundaryless mode may produce a lot of false-positive matches, it is possible to disable it, either fully or partially. '--disable-boundaryless' flag can be used to fully disable this mode. In addition, '-br', '-bi', and '-be' options can be used to tweak which regexes should be in boundaryless mode. Refer to the Filtration section for more information. Formatting the output: --format format_str format_str can be equal to: pretty - Output data in the table json - Output data in json format CUSTOM_STRING - Print data in the way you want. For every match CUSTOM_STRING will be printed and '%x' (See below for possible x values) will be substituted with a match value. For example: pywhat --format '%m - %n' 'google.com htb{flag}' will print: htb{flag} - HackTheBox Flag Format google.com - Uniform Resource Locator (URL) Possible '%x' values: %m - matched text %n - name of regex %d - description (will</s> ===========below chunk 1=========== <s>, ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") def main(**kwargs): # offset: 2 <s> %m - matched text %n - name of regex %d - description (will not output if absent) %e - exploit (will not output if absent) %r - rarity %l - link (will not output if absent) %t - tags (in 'tag1, tag2 ...' format) If you want to print '%' or '\\' character - escape it: '\\%', '\\\\'. Examples: * what 'HTB{this is a flag}' * what '0x52908400098527886E0F7030069857D2E4169EE7' * what -- '52.6169586, -1.9779857' * what --rarity 0.6: '[email protected]' * what --rarity 0: --include "credentials, username, password" --exclude "aws, credentials" 'James:SecretPassword' * what -br 0.6: -be URL '[email protected]' Your text must either be in quotation marks, or use the POSIX standard of "--" to mean "anything after -- is textual input". pyWhat can also search files or even a whole directory with recursion: </s> ===========below chunk 2=========== <s>, ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") def main(**kwargs): # offset: 3 <s> what 'secret.txt' * what 'this/is/a/path' """ if kwargs["text_input"] is None: sys.exit("Text input expected. Run 'pywhat --help' for help") dist = Distribution( create_filter(kwargs["rarity"], kwargs["include"], kwargs["exclude"]) ) if kwargs["disable_boundaryless"]: boundaryless = Filter({"Tags": []}) # use empty filter else: boundaryless = create_filter( kwargs["boundaryless_rarity"], kwargs["boundaryless_include"], kwargs["boundaryless_exclude"], ) what_obj = What_Object(dist) if kwargs["key"] is None: key = Keys.NONE else: try: key = str_to_key(kwargs["key"]) except ValueError: print("Invalid key") sys.exit(1) identified_output = what_obj.what_is_this( kwargs["text_input"], kwargs["only_text"], key, kwargs["reverse"], boundaryless, kwargs["include_filenames"], ) p = printer.Printing() if kwargs["</s> ===========below chunk 3=========== <s>, ) @click.option( "-if", "--include-filenames", is_flag=True, help="Search filenames for possible matches.", ) @click.option( "--format", required=False, help="Format output according to specified rules.", ) @click.option("-pt", "--print-tags", is_flag=True, help="Add flags to output") def main(**kwargs): # offset: 4 <s> or str(kwargs["format"]).strip() == "json": p.print_json(identified_output) elif str(kwargs["format"]).strip() == "pretty": p.pretty_print(identified_output, kwargs["text_input"], kwargs["print_tags"]) elif kwargs["format"] is not None: p.format_print(identified_output, kwargs["format"]) else: p.print_raw(identified_output, kwargs["text_input"], kwargs["print_tags"])
scripts.prepdocs/create_sections
Modified
Azure-Samples~azure-search-openai-demo
e201432ac6878411518b5c16c21bddcf39bd42ae
Remove unsupported characters from Azure Search section id (#39)
<2>:<add> "id": re.sub("[^0-9a-zA-Z_-]","_",f"{filename}-{i}"), <del> "id": f"{filename}-{i}".replace(".", "_").replace(" ", "_"),
# module: scripts.prepdocs def create_sections(filename, pages): <0> for i, (section, pagenum) in enumerate(split_text(pages)): <1> yield { <2> "id": f"{filename}-{i}".replace(".", "_").replace(" ", "_"), <3> "content": section, <4> "category": args.category, <5> "sourcepage": blob_name_from_file_page(filename, pagenum), <6> "sourcefile": filename <7> } <8>
===========unchanged ref 0=========== at: re sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr at: scripts.prepdocs args = parser.parse_args() blob_name_from_file_page(filename, page) split_text(pages)
scripts.prepdocs/index_sections
Modified
Azure-Samples~azure-search-openai-demo
1273a2111e92263d53a9ffa8d6abfd0531961c9b
Fix typo which causes error for document with more than 1000 sections. (#38)
<10>:<add> results = search_client.upload_documents(documents=batch) <del> results = search_client.index_documents(batch=batch)
# module: scripts.prepdocs def index_sections(filename, sections): <0> if args.verbose: print(f"Indexing sections from '{filename}' into search index '{args.index}'") <1> search_client = SearchClient(endpoint=f"https://{args.searchservice}.search.windows.net/", <2> index_name=args.index, <3> credential=search_creds) <4> i = 0 <5> batch = [] <6> for s in sections: <7> batch.append(s) <8> i += 1 <9> if i % 1000 == 0: <10> results = search_client.index_documents(batch=batch) <11> succeeded = sum([1 for r in results if r.succeeded]) <12> if args.verbose: print(f"\tIndexed {len(results)} sections, {succeeded} succeeded") <13> batch = [] <14> <15> if len(batch) > 0: <16> results = search_client.upload_documents(documents=batch) <17> succeeded = sum([1 for r in results if r.succeeded]) <18> if args.verbose: print(f"\tIndexed {len(results)} sections, {succeeded} succeeded") <19>
===========unchanged ref 0=========== at: scripts.prepdocs args = parser.parse_args() search_creds = default_creds if args.searchkey == None else AzureKeyCredential(args.searchkey)
scripts.prepdocs/blob_name_from_file_page
Modified
Azure-Samples~azure-search-openai-demo
6ac7c909c02d760bafd5e5e838fa8c2a46dd4aaf
Use Azure Form Recognizer as document preprocessing to extract text, tables, and document layout (#37)
<0>:<add> if os.path.splitext(filename)[1].lower() == ".pdf": <add> return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" <del> return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" <1>:<add> else: <add> return os.path.basename(filename)
# module: scripts.prepdocs + def blob_name_from_file_page(filename, page = 0): - def blob_name_from_file_page(filename, page): <0> return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" <1>
===========unchanged ref 0=========== at: scripts.prepdocs args = parser.parse_args() azd_credential = AzureDeveloperCliCredential() if args.tenantid == None else AzureDeveloperCliCredential(tenant_id=args.tenantid) ===========changed ref 0=========== # module: scripts.prepdocs MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 parser = argparse.ArgumentParser( description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v" ) parser.add_argument("files", help="Files to be processed") parser.add_argument("--category", help="Value for the category field in the search index for all sections indexed in this run") parser.add_argument("--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage") parser.add_argument("--storageaccount", help="Azure Blob Storage account name") parser.add_argument("--container", help="Azure Blob Storage container name") parser.add_argument("--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)") parser.add_argument("--searchservice", help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)") parser.add_argument("--index", help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)") parser.add_argument("--searchkey", required=False, help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--remove", action="store_true", help="Remove references to this document from blob storage and the search index") parser.add_argument("--removeall", action="store_true", help="Remove all blobs from blob storage and documents from the search index") + parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (</s> ===========changed ref 1=========== # module: scripts.prepdocs # offset: 1 <s> index") + parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents") + parser.add_argument("--formrecognizerservice", required=False, help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)") + parser.add_argument("--formrecognizerkey", required=False, help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") args = parser.parse_args() # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them azd_credential = AzureDeveloperCliCredential() if args.tenantid == None else AzureDeveloperCliCredential(tenant_id=args.tenantid) default_creds = azd_credential if args.searchkey == None or args.storagekey == None else None search_creds = default_creds if args.searchkey == None else AzureKeyCredential(args.searchkey) if not args.skipblobs: storage_creds = default_creds if args.storagekey == None else args.storagekey + if not args.localpdfparser: + # check if Azure Form Recognizer credentials are provided + if args.formrecognizerservice == None: + print("Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser.") + exit(1) + formrecognizer_creds = default_creds if args.formrecognizerkey == None else AzureKeyCredential(args.formrecognizerkey)
scripts.prepdocs/upload_blobs
Modified
Azure-Samples~azure-search-openai-demo
6ac7c909c02d760bafd5e5e838fa8c2a46dd4aaf
Use Azure Form Recognizer as document preprocessing to extract text, tables, and document layout (#37)
<4>:<del> for i in range(len(pages)): <5>:<del> blob_name = blob_name_from_file_page(filename, i) <6>:<del> if args.verbose: print(f"\tUploading blob for page {i} -> {blob_name}") <7>:<del> f = io.BytesIO() <8>:<del> writer = PdfWriter() <9>:<del> writer.add_page(pages[i]) <10>:<del> writer.write(f) <11>:<del> f.seek(0) <12>:<del> blob_container.upload_blob(blob_name, f, overwrite=True)
# module: scripts.prepdocs + def upload_blobs(filename): - def upload_blobs(pages): <0> blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) <1> blob_container = blob_service.get_container_client(args.container) <2> if not blob_container.exists(): <3> blob_container.create_container() <4> for i in range(len(pages)): <5> blob_name = blob_name_from_file_page(filename, i) <6> if args.verbose: print(f"\tUploading blob for page {i} -> {blob_name}") <7> f = io.BytesIO() <8> writer = PdfWriter() <9> writer.add_page(pages[i]) <10> writer.write(f) <11> f.seek(0) <12> blob_container.upload_blob(blob_name, f, overwrite=True) <13>
===========unchanged ref 0=========== at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocs args = parser.parse_args() default_creds = azd_credential if args.searchkey == None or args.storagekey == None else None ===========changed ref 0=========== # module: scripts.prepdocs + def blob_name_from_file_page(filename, page = 0): - def blob_name_from_file_page(filename, page): + if os.path.splitext(filename)[1].lower() == ".pdf": + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" - return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" + else: + return os.path.basename(filename) ===========changed ref 1=========== # module: scripts.prepdocs MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 parser = argparse.ArgumentParser( description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v" ) parser.add_argument("files", help="Files to be processed") parser.add_argument("--category", help="Value for the category field in the search index for all sections indexed in this run") parser.add_argument("--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage") parser.add_argument("--storageaccount", help="Azure Blob Storage account name") parser.add_argument("--container", help="Azure Blob Storage container name") parser.add_argument("--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)") parser.add_argument("--searchservice", help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)") parser.add_argument("--index", help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)") parser.add_argument("--searchkey", required=False, help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--remove", action="store_true", help="Remove references to this document from blob storage and the search index") parser.add_argument("--removeall", action="store_true", help="Remove all blobs from blob storage and documents from the search index") + parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (</s> ===========changed ref 2=========== # module: scripts.prepdocs # offset: 1 <s> index") + parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents") + parser.add_argument("--formrecognizerservice", required=False, help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)") + parser.add_argument("--formrecognizerkey", required=False, help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") args = parser.parse_args() # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them azd_credential = AzureDeveloperCliCredential() if args.tenantid == None else AzureDeveloperCliCredential(tenant_id=args.tenantid) default_creds = azd_credential if args.searchkey == None or args.storagekey == None else None search_creds = default_creds if args.searchkey == None else AzureKeyCredential(args.searchkey) if not args.skipblobs: storage_creds = default_creds if args.storagekey == None else args.storagekey + if not args.localpdfparser: + # check if Azure Form Recognizer credentials are provided + if args.formrecognizerservice == None: + print("Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser.") + exit(1) + formrecognizer_creds = default_creds if args.formrecognizerkey == None else AzureKeyCredential(args.formrecognizerkey)
scripts.prepdocs/split_text
Modified
Azure-Samples~azure-search-openai-demo
6ac7c909c02d760bafd5e5e838fa8c2a46dd4aaf
Use Azure Form Recognizer as document preprocessing to extract text, tables, and document layout (#37)
<3>:<del> <4>:<del> page_map = [] <5>:<del> offset = 0 <6>:<del> for i, p in enumerate(pages): <7>:<del> text = p.extract_text() <8>:<del> page_map.append((i, offset, text)) <9>:<del> offset += len(text)
# module: scripts.prepdocs + def split_text(page_map): - def split_text(pages): <0> SENTENCE_ENDINGS = [".", "!", "?"] <1> WORDS_BREAKS = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] <2> if args.verbose: print(f"Splitting '{filename}' into sections") <3> <4> page_map = [] <5> offset = 0 <6> for i, p in enumerate(pages): <7> text = p.extract_text() <8> page_map.append((i, offset, text)) <9> offset += len(text) <10> <11> def find_page(offset): <12> l = len(page_map) <13> for i in range(l - 1): <14> if offset >= page_map[i][1] and offset < page_map[i + 1][1]: <15> return i <16> return l - 1 <17> <18> all_text = "".join(p[2] for p in page_map) <19> length = len(all_text) <20> start = 0 <21> end = length <22> while start + SECTION_OVERLAP < length: <23> last_word = -1 <24> end = start + MAX_SECTION_LENGTH <25> <26> if end > length: <27> end = length <28> else: <29> # Try to find the end of the sentence <30> while end < length and (end - start - MAX_SECTION_LENGTH) < SENTENCE_SEARCH_LIMIT and all_text[end] not in SENTENCE_ENDINGS: <31> if all_text[end] in WORDS_BREAKS: <32> last_word = end <33> end += 1 <34> if end < length and all_text[end] not in SENTENCE_ENDINGS and last_word > 0: <35> end = last_word # Fall back to at least keeping a whole word <36> if end < length: <37> end += 1 <38> <39> # Try to find the start of the sentence or at least a whole word boundary <40> </s>
===========below chunk 0=========== # module: scripts.prepdocs + def split_text(page_map): - def split_text(pages): # offset: 1 while start > 0 and start > end - MAX_SECTION_LENGTH - 2 * SENTENCE_SEARCH_LIMIT and all_text[start] not in SENTENCE_ENDINGS: if all_text[start] in WORDS_BREAKS: last_word = start start -= 1 if all_text[start] not in SENTENCE_ENDINGS and last_word > 0: start = last_word if start > 0: start += 1 yield (all_text[start:end], find_page(start)) start = end - SECTION_OVERLAP if start + SECTION_OVERLAP < end: yield (all_text[start:end], find_page(start)) ===========unchanged ref 0=========== at: html escape(s: AnyStr, quote: bool=...) -> AnyStr at: io.BytesIO seek(self, offset: int, whence: int=..., /) -> int at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: re match(pattern: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]] match(pattern: Pattern[AnyStr], string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]] at: scripts.prepdocs args = parser.parse_args() storage_creds = default_creds if args.storagekey == None else args.storagekey formrecognizer_creds = default_creds if args.formrecognizerkey == None else AzureKeyCredential(args.formrecognizerkey) blob_name_from_file_page(filename, page=0) at: scripts.prepdocs.upload_blobs blob_container = blob_service.get_container_client(args.container) pages = reader.pages f = io.BytesIO() writer = PdfWriter() ===========changed ref 0=========== # module: scripts.prepdocs + def blob_name_from_file_page(filename, page = 0): - def blob_name_from_file_page(filename, page): + if os.path.splitext(filename)[1].lower() == ".pdf": + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" - return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" + else: + return os.path.basename(filename) ===========changed ref 1=========== # module: scripts.prepdocs + def upload_blobs(filename): - def upload_blobs(pages): blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) blob_container = blob_service.get_container_client(args.container) if not blob_container.exists(): blob_container.create_container() - for i in range(len(pages)): - blob_name = blob_name_from_file_page(filename, i) - if args.verbose: print(f"\tUploading blob for page {i} -> {blob_name}") - f = io.BytesIO() - writer = PdfWriter() - writer.add_page(pages[i]) - writer.write(f) - f.seek(0) - blob_container.upload_blob(blob_name, f, overwrite=True) ===========changed ref 2=========== # module: scripts.prepdocs MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 parser = argparse.ArgumentParser( description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v" ) parser.add_argument("files", help="Files to be processed") parser.add_argument("--category", help="Value for the category field in the search index for all sections indexed in this run") parser.add_argument("--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage") parser.add_argument("--storageaccount", help="Azure Blob Storage account name") parser.add_argument("--container", help="Azure Blob Storage container name") parser.add_argument("--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)") parser.add_argument("--searchservice", help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)") parser.add_argument("--index", help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)") parser.add_argument("--searchkey", required=False, help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--remove", action="store_true", help="Remove references to this document from blob storage and the search index") parser.add_argument("--removeall", action="store_true", help="Remove all blobs from blob storage and documents from the search index") + parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (</s> ===========changed ref 3=========== # module: scripts.prepdocs # offset: 1 <s> index") + parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents") + parser.add_argument("--formrecognizerservice", required=False, help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)") + parser.add_argument("--formrecognizerkey", required=False, help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") args = parser.parse_args() # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them azd_credential = AzureDeveloperCliCredential() if args.tenantid == None else AzureDeveloperCliCredential(tenant_id=args.tenantid) default_creds = azd_credential if args.searchkey == None or args.storagekey == None else None search_creds = default_creds if args.searchkey == None else AzureKeyCredential(args.searchkey) if not args.skipblobs: storage_creds = default_creds if args.storagekey == None else args.storagekey + if not args.localpdfparser: + # check if Azure Form Recognizer credentials are provided + if args.formrecognizerservice == None: + print("Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser.") + exit(1) + formrecognizer_creds = default_creds if args.formrecognizerkey == None else AzureKeyCredential(args.formrecognizerkey)
scripts.prepdocs/create_sections
Modified
Azure-Samples~azure-search-openai-demo
6ac7c909c02d760bafd5e5e838fa8c2a46dd4aaf
Use Azure Form Recognizer as document preprocessing to extract text, tables, and document layout (#37)
<0>:<add> for i, (section, pagenum) in enumerate(split_text(page_map)): <del> for i, (section, pagenum) in enumerate(split_text(pages)):
# module: scripts.prepdocs + def create_sections(filename, page_map): - def create_sections(filename, pages): <0> for i, (section, pagenum) in enumerate(split_text(pages)): <1> yield { <2> "id": re.sub("[^0-9a-zA-Z_-]","_",f"{filename}-{i}"), <3> "content": section, <4> "category": args.category, <5> "sourcepage": blob_name_from_file_page(filename, pagenum), <6> "sourcefile": filename <7> } <8>
===========unchanged ref 0=========== at: scripts.prepdocs.get_document_text tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1] ===========changed ref 0=========== # module: scripts.prepdocs + def blob_name_from_file_page(filename, page = 0): - def blob_name_from_file_page(filename, page): + if os.path.splitext(filename)[1].lower() == ".pdf": + return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" - return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf" + else: + return os.path.basename(filename) ===========changed ref 1=========== # module: scripts.prepdocs + def table_to_html(table): + table_html = "<table>" + rows = [sorted([cell for cell in table.cells if cell.row_index == i], key=lambda cell: cell.column_index) for i in range(table.row_count)] + for row_cells in rows: + table_html += "<tr>" + for cell in row_cells: + tag = "th" if (cell.kind == "columnHeader" or cell.kind == "rowHeader") else "td" + cell_spans = "" + if cell.column_span > 1: cell_spans += f" colSpan={cell.column_span}" + if cell.row_span > 1: cell_spans += f" rowSpan={cell.row_span}" + table_html += f"<{tag}{cell_spans}>{html.escape(cell.content)}</{tag}>" + table_html +="</tr>" + table_html += "</table>" + return table_html + ===========changed ref 2=========== # module: scripts.prepdocs + def upload_blobs(filename): - def upload_blobs(pages): blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) blob_container = blob_service.get_container_client(args.container) if not blob_container.exists(): blob_container.create_container() - for i in range(len(pages)): - blob_name = blob_name_from_file_page(filename, i) - if args.verbose: print(f"\tUploading blob for page {i} -> {blob_name}") - f = io.BytesIO() - writer = PdfWriter() - writer.add_page(pages[i]) - writer.write(f) - f.seek(0) - blob_container.upload_blob(blob_name, f, overwrite=True) ===========changed ref 3=========== # module: scripts.prepdocs + def get_document_text(filename): + offset = 0 + page_map = [] + if args.localpdfparser: + reader = PdfReader(filename) + pages = reader.pages + for page_num, p in enumerate(pages): + page_text = p.extract_text() + page_map.append((page_num, offset, page_text)) + offset += len(page_text) + else: + if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") + form_recognizer_client = DocumentAnalysisClient(endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}) + with open(filename, "rb") as f: + poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document = f) + form_recognizer_results = poller.result() + + for page_num, page in enumerate(form_recognizer_results.pages): + tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1] + + # mark all positions of the table spans in the page + page_offset = page.spans[0].offset + page_length = page.spans[0].length + table_chars = [-1]*page_length + for table_id, table in enumerate(tables_on_page): + for span in table.spans: + # replace all table spans with "table_id" in table_chars array + for i in range(span.length): + idx = span.offset - page_offset + i + if idx >=0 and idx < page_length: + table_chars[idx] = table_id + + </s> ===========changed ref 4=========== # module: scripts.prepdocs + def get_document_text(filename): # offset: 1 <s> <add> if idx >=0 and idx < page_length: + table_chars[idx] = table_id + + # build page text by replacing charcters in table spans with table html + page_text = "" + added_tables = set() + for idx, table_id in enumerate(table_chars): + if table_id == -1: + page_text += form_recognizer_results.content[page_offset + idx] + elif not table_id in added_tables: + page_text += table_to_html(tables_on_page[table_id]) + added_tables.add(table_id) + + page_text += " " + page_map.append((page_num, offset, page_text)) + offset += len(page_text) + + return page_map + ===========changed ref 5=========== # module: scripts.prepdocs + def split_text(page_map): - def split_text(pages): SENTENCE_ENDINGS = [".", "!", "?"] WORDS_BREAKS = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] if args.verbose: print(f"Splitting '{filename}' into sections") - - page_map = [] - offset = 0 - for i, p in enumerate(pages): - text = p.extract_text() - page_map.append((i, offset, text)) - offset += len(text) def find_page(offset): l = len(page_map) for i in range(l - 1): if offset >= page_map[i][1] and offset < page_map[i + 1][1]: return i return l - 1 all_text = "".join(p[2] for p in page_map) length = len(all_text) start = 0 end = length while start + SECTION_OVERLAP < length: last_word = -1 end = start + MAX_SECTION_LENGTH if end > length: end = length else: # Try to find the end of the sentence while end < length and (end - start - MAX_SECTION_LENGTH) < SENTENCE_SEARCH_LIMIT and all_text[end] not in SENTENCE_ENDINGS: if all_text[end] in WORDS_BREAKS: last_word = end end += 1 if end < length and all_text[end] not in SENTENCE_ENDINGS and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while start > 0 and start > end - MAX_SECTION_LENGTH - 2 * SENTENCE_SEARCH_LIMIT and all</s>
app.backend.langchainadapters/HtmlCallbackHandler.on_tool_start
Modified
Azure-Samples~azure-search-openai-demo
74d0776b127e955a739b53a8e459c4bd4085a581
Merge pull request #176 from mattmsft/mattmsft-patch-1
<1>:<add> pass <del> self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>"
# module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): def on_tool_start( self, serialized: Dict[str, Any], + input_str: str, - action: AgentAction, color: Optional[str] = None, **kwargs: Any, ) -> None: <0> """Print out the log in specified color.""" <1> self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" <2>
===========unchanged ref 0=========== at: app.backend.langchainadapters.HtmlCallbackHandler html: str = "" at: typing Dict = _alias(dict, 2, inst=False, name='Dict')
app.backend.lookuptool/CsvLookupTool.__init__
Modified
Azure-Samples~azure-search-openai-demo
74d0776b127e955a739b53a8e459c4bd4085a581
Merge pull request #176 from mattmsft/mattmsft-patch-1
<0>:<add> super().__init__(name, self.lookup, description, callbacks=callbacks) <del> super().__init__(name, self.lookup, description) <1>:<del> self.data = {}
<s> class CsvLookupTool(Tool): + def __init__(self, filename: path, key_field: str, name: str = "lookup", + description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question", - def __init__(self, filename: path, key_field: str, name: str = "lookup", description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question"): + callbacks: Callbacks = None): <0> super().__init__(name, self.lookup, description) <1> self.data = {} <2> with open(filename, newline='') as csvfile: <3> reader = csv.DictReader(csvfile) <4> for row in reader: <5> self.data[row[key_field]] = "\n".join([f"{i}:{row[i]}" for i in row]) <6>
===========unchanged ref 0=========== at: app.backend.lookuptool.CsvLookupTool lookup(key: str) -> Optional[str] ===========changed ref 0=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): + def on_agent_action( + self, + action: AgentAction, + color: Optional[str] = None, + **kwargs: Any) -> Any: + self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" + ===========changed ref 1=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): def on_tool_start( self, serialized: Dict[str, Any], + input_str: str, - action: AgentAction, color: Optional[str] = None, **kwargs: Any, ) -> None: """Print out the log in specified color.""" + pass - self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>"
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
74d0776b127e955a739b53a8e459c4bd4085a581
Merge pull request #176 from mattmsft/mattmsft-patch-1
<7>:<add> acs_tool = Tool(name="CognitiveSearch", <add> func=lambda q: self.retrieve(q, overrides), <add> description=self.CognitiveSearchToolDescription, <add> callbacks=cb_manager) <del> acs_tool = Tool(name = "CognitiveSearch", func = lambda q: self.retrieve(q, overrides), description = self.CognitiveSearchToolDescription) <8>:<add> employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) <del> employee_tool = EmployeeInfoTool("Employee1")
<s> parts: first use GPT to see if we need more information, # second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question. # This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. # [1] E. Karpas, et al. arXiv:2205.00445 class ReadRetrieveReadApproach(Approach): def run(self, q: str, overrides: dict) -> any: <0> # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple <1> self.results = None <2> <3> # Use to capture thought process during iterations <4> cb_handler = HtmlCallbackHandler() <5> cb_manager = CallbackManager(handlers=[cb_handler]) <6> <7> acs_tool = Tool(name = "CognitiveSearch", func = lambda q: self.retrieve(q, overrides), description = self.CognitiveSearchToolDescription) <8> employee_tool = EmployeeInfoTool("Employee1") <9> tools = [acs_tool, employee_tool] <10> <11> prompt = ZeroShotAgent.create_prompt( <12> tools=tools, <13> prefix=overrides.get("prompt_template_prefix") or self.template_prefix, <14> suffix=overrides.get("prompt_template_suffix") or self.template_suffix, <15> input_variables = ["input", "agent_scratchpad"]) <16> llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) <17> chain = LLMChain(llm = llm, prompt = prompt) <18> agent_exec = AgentExecutor.from_agent_and_tools( <19> agent = ZeroShotAgent(llm_chain = chain, tools = tools), <20> tools = tools, <21> verbose = True, <22> callback_manager = cb_manager) <23> result =</s>
===========below chunk 0=========== <s>PT to see if we need more information, # second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question. # This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. # [1] E. Karpas, et al. arXiv:2205.00445 class ReadRetrieveReadApproach(Approach): def run(self, q: str, overrides: dict) -> any: # offset: 1 # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread EmployeeInfoTool(employee_name: str, callbacks: Callbacks=None) at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \ "Answer the question using only the data provided in the information sources below. " \ "For tabular information return it as an html table. Do not return markdown format. " \ "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \ "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \ "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \ "If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \ "Never quote tool names as sources." \ "If you cannot answer using the sources below, say that you don't know. " \ "\n\nYou can access to the following tools:" template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." retrieve(q: str, overrides: dict) -> any at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach.__init__ self.openai_deployment = openai_deployment ===========unchanged ref 1=========== at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach.retrieve self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) for doc in r] self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) for doc in r] at: approaches.approach.Approach run(self, q: str, use_summaries: bool) -> any at: openai api_key = os.environ.get("OPENAI_API_KEY") at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): + def on_agent_action( + self, + action: AgentAction, + color: Optional[str] = None, + **kwargs: Any) -> Any: + self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" + ===========changed ref 1=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): def on_tool_start( self, serialized: Dict[str, Any], + input_str: str, - action: AgentAction, color: Optional[str] = None, **kwargs: Any, ) -> None: """Print out the log in specified color.""" + pass - self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" ===========changed ref 2=========== <s> class CsvLookupTool(Tool): + def __init__(self, filename: path, key_field: str, name: str = "lookup", + description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question", - def __init__(self, filename: path, key_field: str, name: str = "lookup", description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question"): + callbacks: Callbacks = None): + super().__init__(name, self.lookup, description, callbacks=callbacks) - super().__init__(name, self.lookup, description) - self.data = {} with open(filename, newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: self.data[row[key_field]] = "\n".join([f"{i}:{row[i]}" for i in row])
app.backend.approaches.readretrieveread/EmployeeInfoTool.__init__
Modified
Azure-Samples~azure-search-openai-demo
74d0776b127e955a739b53a8e459c4bd4085a581
Merge pull request #176 from mattmsft/mattmsft-patch-1
<0>:<add> super().__init__(filename="data/employeeinfo.csv", <add> key_field="name", <add> name="Employee", <add> description="useful for answering questions about the employee, their benefits and other personal information", <add> callbacks=callbacks) <del> super().__init__(filename = "data/employeeinfo.csv", key_field = "name", name = "Employee", description = "useful for answering questions about the employee, their benefits and other personal information")
# module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): + def __init__(self, employee_name: str, callbacks: Callbacks = None): - def __init__(self, employee_name: str): <0> super().__init__(filename = "data/employeeinfo.csv", key_field = "name", name = "Employee", description = "useful for answering questions about the employee, their benefits and other personal information") <1> self.func = self.employee_info <2> self.employee_name = employee_name <3>
===========unchanged ref 0=========== at: lookuptool CsvLookupTool(filename: path, key_field: str, name: str="lookup", description: str="useful to look up details given an input key as opposite to searching data with an unstructured question", callbacks: Callbacks=None) at: lookuptool.CsvLookupTool data: dict[str, str] = {} __init__(self, filename: path, key_field: str, name: str="lookup", description: str="useful to look up details given an input key as opposite to searching data with an unstructured question", callbacks: Callbacks=None) ===========changed ref 0=========== <s> parts: first use GPT to see if we need more information, # second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question. # This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. # [1] E. Karpas, et al. arXiv:2205.00445 class ReadRetrieveReadApproach(Approach): def run(self, q: str, overrides: dict) -> any: # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple self.results = None # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) + acs_tool = Tool(name="CognitiveSearch", + func=lambda q: self.retrieve(q, overrides), + description=self.CognitiveSearchToolDescription, + callbacks=cb_manager) - acs_tool = Tool(name = "CognitiveSearch", func = lambda q: self.retrieve(q, overrides), description = self.CognitiveSearchToolDescription) + employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) - employee_tool = EmployeeInfoTool("Employee1") tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec =</s> ===========changed ref 1=========== <s>PT to see if we need more information, # second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question. # This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. # [1] E. Karpas, et al. arXiv:2205.00445 class ReadRetrieveReadApproach(Approach): def run(self, q: str, overrides: dict) -> any: # offset: 1 <s>ai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( agent = ZeroShotAgent(llm_chain = chain, tools = tools), tools = tools, verbose = True, callback_manager = cb_manager) result = agent_exec.run(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========changed ref 2=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): + def on_agent_action( + self, + action: AgentAction, + color: Optional[str] = None, + **kwargs: Any) -> Any: + self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" + ===========changed ref 3=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): def on_tool_start( self, serialized: Dict[str, Any], + input_str: str, - action: AgentAction, color: Optional[str] = None, **kwargs: Any, ) -> None: """Print out the log in specified color.""" + pass - self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" ===========changed ref 4=========== <s> class CsvLookupTool(Tool): + def __init__(self, filename: path, key_field: str, name: str = "lookup", + description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question", - def __init__(self, filename: path, key_field: str, name: str = "lookup", description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question"): + callbacks: Callbacks = None): + super().__init__(name, self.lookup, description, callbacks=callbacks) - super().__init__(name, self.lookup, description) - self.data = {} with open(filename, newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: self.data[row[key_field]] = "\n".join([f"{i}:{row[i]}" for i in row])
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.run
Modified
Azure-Samples~azure-search-openai-demo
74d0776b127e955a739b53a8e459c4bd4085a581
Merge pull request #176 from mattmsft/mattmsft-patch-1
<9>:<add> Tool(name="Search", func=lambda q: self.search(q, overrides), description="useful for when you need to ask with search", callbacks=cb_manager), <add> Tool(name="Lookup", func=self.lookup, description="useful for when you need to ask with lookup", callbacks=cb_manager) <del> Tool(name="Search", func=lambda q: self.search(q, overrides)), <10>:<del> Tool(name="Lookup", func=self.lookup) <23>:<add> # Replace substrings of the form <file.ext> with [file.ext] so that the frontend can render them as links, match them with a regex to avoid <add> # generalizing too much and disrupt HTML snippets if present <add> result = re.sub(r"<([a-zA-Z0-9_ \-\.]+)>", r"[\1]", result) <del> # Fix up references to they look like what the frontend expects ([] instead of ()), need a better citation format since parentheses are so common <24>:<del> result = result.replace("(", "[").replace(")", "]")
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(Approach): def run(self, q: str, overrides: dict) -> any: <0> # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple <1> self.results = None <2> <3> # Use to capture thought process during iterations <4> cb_handler = HtmlCallbackHandler() <5> cb_manager = CallbackManager(handlers=[cb_handler]) <6> <7> llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) <8> tools = [ <9> Tool(name="Search", func=lambda q: self.search(q, overrides)), <10> Tool(name="Lookup", func=self.lookup) <11> ] <12> <13> # Like results above, not great to keep this as a global, will interfere with interleaving <14> global prompt <15> prompt_prefix = overrides.get("prompt_template") <16> prompt = PromptTemplate.from_examples( <17> EXAMPLES, SUFFIX, ["input", "agent_scratchpad"], prompt_prefix + "\n\n" + PREFIX if prompt_prefix else PREFIX) <18> <19> agent = ReAct.from_llm_and_tools(llm, tools) <20> chain = AgentExecutor.from_agent_and_tools(agent, tools, verbose=True, callback_manager=cb_manager) <21> result = chain.run(q) <22> <23> # Fix up references to they look like what the frontend expects ([] instead of ()), need a better citation format since parentheses are so common <24> result = result.replace("(", "[").replace(")", "]") <25> <26> return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} <27>
===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask EXAMPLES = [ """Question: What is the elevation range for the area that the eastern sector of the Colorado orogeny extends into? Thought: I need to search Colorado orogeny, find the area that the eastern sector of the Colorado orogeny extends into, then find the elevation range of the area. Action: Search[Colorado orogeny] Observation: <info1.pdf> The Colorado orogeny was an episode of mountain building (an orogeny) in Colorado and surrounding areas. Thought: It does not mention the eastern sector. So I need to look up eastern sector. Action: Lookup[eastern sector] Observation: <info2.txt> (Result 1 / 1) The eastern sector extends into the High Plains and is called the Central Plains orogeny. Thought: The eastern sector of Colorado orogeny extends into the High Plains. So I need to search High Plains and find its elevation range. Action: Search[High Plains] Observation: <some_file.pdf> High Plains refers to one of two distinct land regions Thought: I need to instead search High Plains (United States). Action: Search[High Plains (United States)] Observation: <filea.pdf> The High Plains are a subregion of the Great Plains. <another-ref.docx> From east to west, the High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130 m). Thought: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer is 1,800 to 7,000 ft. Action: Finish[1,800 to 7,000 ft <filea.pdf>]""", """Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons" character Milhouse, who</s> ===========unchanged ref 1=========== SUFFIX = """\nQuestion: {input} {agent_scratchpad}""" PREFIX = "Answer questions as shown in the following examples, by splitting the question into individual search or lookup actions to find facts until you can answer the question. " \ "Observations are prefixed by their source name in angled brackets, source names MUST be included with the actions in the answers." \ "All questions must be answered from the results from search or look up actions, only facts resulting from those can be used in an answer. " at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk search(q: str, overrides: dict) -> str lookup(q: str) -> str at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.__init__ self.openai_deployment = openai_deployment at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.search self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) for doc in r] self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) for doc in r] at: approaches.approach.Approach run(self, q: str, use_summaries: bool) -> any at: openai api_key = os.environ.get("OPENAI_API_KEY") ===========unchanged ref 2=========== at: re sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): + def on_agent_action( + self, + action: AgentAction, + color: Optional[str] = None, + **kwargs: Any) -> Any: + self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" + ===========changed ref 1=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): def on_tool_start( self, serialized: Dict[str, Any], + input_str: str, - action: AgentAction, color: Optional[str] = None, **kwargs: Any, ) -> None: """Print out the log in specified color.""" + pass - self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" ===========changed ref 2=========== <s> class CsvLookupTool(Tool): + def __init__(self, filename: path, key_field: str, name: str = "lookup", + description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question", - def __init__(self, filename: path, key_field: str, name: str = "lookup", description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question"): + callbacks: Callbacks = None): + super().__init__(name, self.lookup, description, callbacks=callbacks) - super().__init__(name, self.lookup, description) - self.data = {} with open(filename, newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: self.data[row[key_field]] = "\n".join([f"{i}:{row[i]}" for i in row]) ===========changed ref 3=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): + def __init__(self, employee_name: str, callbacks: Callbacks = None): - def __init__(self, employee_name: str): + super().__init__(filename="data/employeeinfo.csv", + key_field="name", + name="Employee", + description="useful for answering questions about the employee, their benefits and other personal information", + callbacks=callbacks) - super().__init__(filename = "data/employeeinfo.csv", key_field = "name", name = "Employee", description = "useful for answering questions about the employee, their benefits and other personal information") self.func = self.employee_info self.employee_name = employee_name
app.backend.approaches.readretrieveread/EmployeeInfoTool.employee_info
Modified
Azure-Samples~azure-search-openai-demo
46e75e16d88a3f106f3e32bbb75d65617d0b83e3
Merge pull request #345 from pamelafox/other-way
<0>:<add> return self.lookup(name) <del> return self.lookup(self.employee_name)
# module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): + def employee_info(self, name: str) -> str: - def employee_info(self, unused: str) -> str: <0> return self.lookup(self.employee_name) <1>
===========changed ref 0=========== <s> information - # is present then formulate an answer. Each iteration consists of two parts: first use GPT to see if we need more information, - # second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question. - # This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. - # [1] E. Karpas, et al. arXiv:2205.00445 class ReadRetrieveReadApproach(Approach): + """ + Attempt to answer questions by iteratively evaluating the question to see what information is missing, and once all information + is present then formulate an answer. Each iteration consists of two parts: + 1. use GPT to see if we need more information + 2. if more data is needed, use the requested "tool" to retrieve it. + The last call to GPT answers the actual question. + This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. + + [1] E. Karpas, et al. arXiv:2205.00445 + """ + template_prefix = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \ "Answer the question using only the data provided in the information sources below. " \ "For tabular information return it as an html table. Do not return markdown format. " \ "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \ "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \ "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \ "If there are multiple sources, cite each one in their own square brackets. For example</s> ===========changed ref 1=========== <s> present then formulate an answer. Each iteration consists of two parts: first use GPT to see if we need more information, - # second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question. - # This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. - # [1] E. Karpas, et al. arXiv:2205.00445 class ReadRetrieveReadApproach(Approach): # offset: 1 <s> prefix before the colon (\":\"). " \ "If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \ "Never quote tool names as sources." \ "If you cannot answer using the sources below, say that you don't know. " \ "\n\nYou can access to the following tools:" template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc."
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_chat_history_as_text
Modified
Azure-Samples~azure-search-openai-demo
46e75e16d88a3f106f3e32bbb75d65617d0b83e3
Merge pull request #345 from pamelafox/other-way
<2>:<add> history_text = """<|im_start|>user""" + "\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot", "") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text <del> history_text = """<|im_start|>user""" +"\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text
<s> a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class ChatReadRetrieveReadApproach(Approach): + def get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str: - def get_chat_history_as_text(self, history, include_last_turn=True, approx_max_tokens=1000) -> str: <0> history_text = "" <1> for h in reversed(history if include_last_turn else history[:-1]): <2> history_text = """<|im_start|>user""" +"\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text <3> if len(history_text) > approx_max_tokens*4: <4> break <5> return history_text <6>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach prompt_prefix = """<|im_start|>system Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} Sources: {sources} <|im_end|> {chat_history} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) q = completion.choices[0].text results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] ===========unchanged ref 2=========== completion = openai.Completion.create( engine=self.chatgpt_deployment, prompt=prompt, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1, stop=["<|im_end|>", "<|im_start|>"]) at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread - # Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves - # top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class ChatReadRetrieveReadApproach(Approach): + """ + Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves + top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion + (answer) with that prompt. + """ + prompt_prefix = """<|im_start|>system Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. + Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. - Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brakets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} Sources: {sources} <|im_end|> {chat_history} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. </s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread - # Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves - # top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s>Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): + def employee_info(self, name: str) -> str: - def employee_info(self, unused: str) -> str: + return self.lookup(name) - return self.lookup(self.employee_name)
app.backend.app/content_file
Modified
Azure-Samples~azure-search-openai-demo
46e75e16d88a3f106f3e32bbb75d65617d0b83e3
Merge pull request #345 from pamelafox/other-way
<1>:<add> if not blob.properties or not blob.properties.has_key("content_settings"): <add> abort(404) <4>:<add> blob_file = io.BytesIO() <add> blob.readinto(blob_file) <add> blob_file.seek(0) <add> return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) <del> return blob.readall(), 200, {"Content-Type": mime_type, "Content-Disposition": f"inline; filename={path}"}
# module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @app.route("/content/<path>") def content_file(path): <0> blob = blob_container.get_blob_client(path).download_blob() <1> mime_type = blob.properties["content_settings"]["content_type"] <2> if mime_type == "application/octet-stream": <3> mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" <4> return blob.readall(), 200, {"Content-Type": mime_type, "Content-Disposition": f"inline; filename={path}"} <5>
===========unchanged ref 0=========== at: app.backend.app blob_container = blob_client.get_container_client(AZURE_STORAGE_CONTAINER) app = Flask(__name__) ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): + def employee_info(self, name: str) -> str: - def employee_info(self, unused: str) -> str: + return self.lookup(name) - return self.lookup(self.employee_name) ===========changed ref 1=========== <s> a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class ChatReadRetrieveReadApproach(Approach): + def get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str: - def get_chat_history_as_text(self, history, include_last_turn=True, approx_max_tokens=1000) -> str: history_text = "" for h in reversed(history if include_last_turn else history[:-1]): + history_text = """<|im_start|>user""" + "\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot", "") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text - history_text = """<|im_start|>user""" +"\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text if len(history_text) > approx_max_tokens*4: break return history_text ===========changed ref 2=========== # module: app.backend.approaches.retrievethenread - # Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves - # top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class RetrieveThenReadApproach(Approach): + """ + Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves + top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion + (answer) with that prompt. + """ + template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ "If you cannot answer using the sources below, say you don't know. " + \ """ ### Question: 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region Answer: In-network deductibles are $500 for employee and $</s> ===========changed ref 3=========== # module: app.backend.approaches.retrievethenread - # Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves - # top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class RetrieveThenReadApproach(Approach): # offset: 1 <s>ake, Swedish and others in the region Answer: In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]. ### Question: '{q}'? Sources: {retrieved} Answer: """ ===========changed ref 4=========== <s> information - # is present then formulate an answer. Each iteration consists of two parts: first use GPT to see if we need more information, - # second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question. - # This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. - # [1] E. Karpas, et al. arXiv:2205.00445 class ReadRetrieveReadApproach(Approach): + """ + Attempt to answer questions by iteratively evaluating the question to see what information is missing, and once all information + is present then formulate an answer. Each iteration consists of two parts: + 1. use GPT to see if we need more information + 2. if more data is needed, use the requested "tool" to retrieve it. + The last call to GPT answers the actual question. + This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain. + + [1] E. Karpas, et al. arXiv:2205.00445 + """ + template_prefix = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \ "Answer the question using only the data provided in the information sources below. " \ "For tabular information return it as an html table. Do not return markdown format. " \ "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \ "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \ "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \ "If there are multiple sources, cite each one in their own square brackets. For example</s>
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
46e75e16d88a3f106f3e32bbb75d65617d0b83e3
Merge pull request #345 from pamelafox/other-way
<1>:<add> if not request.json: <add> return jsonify({"error": "request must be json"}), 400
# module: app.backend.app @app.route("/ask", methods=["POST"]) def ask(): <0> ensure_openai_token() <1> approach = request.json["approach"] <2> try: <3> impl = ask_approaches.get(approach) <4> if not impl: <5> return jsonify({"error": "unknown approach"}), 400 <6> r = impl.run(request.json["question"], request.json.get("overrides") or {}) <7> return jsonify(r) <8> except Exception as e: <9> logging.exception("Exception in /ask") <10> return jsonify({"error": str(e)}), 500 <11>
===========unchanged ref 0=========== at: app.backend.app app = Flask(__name__) ensure_openai_token() at: app.backend.app.content_file blob = blob_container.get_blob_client(path).download_blob() at: io BytesIO(initial_bytes: bytes=...) at: io.BytesIO seek(self, offset: int, whence: int=..., /) -> int at: mimetypes guess_type(url: Union[Text, PathLike[str]], strict: bool=...) -> Tuple[Optional[str], Optional[str]] ===========changed ref 0=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @app.route("/content/<path>") def content_file(path): blob = blob_container.get_blob_client(path).download_blob() + if not blob.properties or not blob.properties.has_key("content_settings"): + abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" + blob_file = io.BytesIO() + blob.readinto(blob_file) + blob_file.seek(0) + return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) - return blob.readall(), 200, {"Content-Type": mime_type, "Content-Disposition": f"inline; filename={path}"} ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): + def employee_info(self, name: str) -> str: - def employee_info(self, unused: str) -> str: + return self.lookup(name) - return self.lookup(self.employee_name) ===========changed ref 2=========== <s> a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class ChatReadRetrieveReadApproach(Approach): + def get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str: - def get_chat_history_as_text(self, history, include_last_turn=True, approx_max_tokens=1000) -> str: history_text = "" for h in reversed(history if include_last_turn else history[:-1]): + history_text = """<|im_start|>user""" + "\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot", "") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text - history_text = """<|im_start|>user""" +"\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text if len(history_text) > approx_max_tokens*4: break return history_text ===========changed ref 3=========== # module: app.backend.approaches.retrievethenread - # Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves - # top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class RetrieveThenReadApproach(Approach): + """ + Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves + top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion + (answer) with that prompt. + """ + template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ "If you cannot answer using the sources below, say you don't know. " + \ """ ### Question: 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region Answer: In-network deductibles are $500 for employee and $</s> ===========changed ref 4=========== # module: app.backend.approaches.retrievethenread - # Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves - # top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion - # (answer) with that prompt. class RetrieveThenReadApproach(Approach): # offset: 1 <s>ake, Swedish and others in the region Answer: In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]. ### Question: '{q}'? Sources: {retrieved} Answer: """
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
46e75e16d88a3f106f3e32bbb75d65617d0b83e3
Merge pull request #345 from pamelafox/other-way
<1>:<add> if not request.json: <add> return jsonify({"error": "request must be json"}), 400
# module: app.backend.app @app.route("/chat", methods=["POST"]) def chat(): <0> ensure_openai_token() <1> approach = request.json["approach"] <2> try: <3> impl = chat_approaches.get(approach) <4> if not impl: <5> return jsonify({"error": "unknown approach"}), 400 <6> r = impl.run(request.json["history"], request.json.get("overrides") or {}) <7> return jsonify(r) <8> except Exception as e: <9> logging.exception("Exception in /chat") <10> return jsonify({"error": str(e)}), 500 <11>
===========unchanged ref 0=========== at: app.backend.app ask_approaches = { "rtr": RetrieveThenReadApproach(search_client, AZURE_OPENAI_GPT_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT), "rrr": ReadRetrieveReadApproach(search_client, AZURE_OPENAI_GPT_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT), "rda": ReadDecomposeAsk(search_client, AZURE_OPENAI_GPT_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT) } app = Flask(__name__) ensure_openai_token() at: app.backend.app.ask approach = request.json["approach"] at: approaches.readdecomposeask.ReadDecomposeAsk run(q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 1=========== at: approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \ "Answer the question using only the data provided in the information sources below. " \ "For tabular information return it as an html table. Do not return markdown format. " \ "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \ "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \ "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \ "If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \ "Never quote tool names as sources." \ "If you cannot answer using the sources below, say that you don't know. " \ "\n\nYou can access to the following tools:" template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." run(q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 2=========== at: approaches.retrievethenread.RetrieveThenReadApproach template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ "If you cannot answer using the sources below, say you don't know. " + \ """ ### Question: 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region Answer: In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]. ### Question: '{q}'? Sources: {retrieved} Answer: """ run(q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 3=========== at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.app @app.route("/ask", methods=["POST"]) def ask(): ensure_openai_token() + if not request.json: + return jsonify({"error": "request must be json"}), 400 approach = request.json["approach"] try: impl = ask_approaches.get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 r = impl.run(request.json["question"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 1=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @app.route("/content/<path>") def content_file(path): blob = blob_container.get_blob_client(path).download_blob() + if not blob.properties or not blob.properties.has_key("content_settings"): + abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" + blob_file = io.BytesIO() + blob.readinto(blob_file) + blob_file.seek(0) + return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) - return blob.readall(), 200, {"Content-Type": mime_type, "Content-Disposition": f"inline; filename={path}"} ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): + def employee_info(self, name: str) -> str: - def employee_info(self, unused: str) -> str: + return self.lookup(name) - return self.lookup(self.employee_name)
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
185b0c64d9ae748e241ca253e1d681284849227a
upgrade to chat completion api
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") if prompt_override is None: prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) else: prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) # STEP 3: Generate a contextual and content specific answer using the search results and chat history completion = openai.Completion.create( engine=self.chatgpt_deployment, prompt=prompt, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1, stop=["<|im_end|>", "<|im_start|>"]) return {"data_points": results, "answer": completion.choices[0].text, "tho</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s>_start|>"]) return {"data_points": results, "answer": completion.choices[0].text, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach prompt_prefix = """<|im_start|>system Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} Sources: {sources} <|im_end|> {chat_history} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" ===========unchanged ref 2=========== create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
b21d06f347d144171f432a36f60cd4b9b4106054
update model version
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") if prompt_override is None: prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) else: prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) print("history: \n") print(history) print("prompt: \n") print(prompt) messages = self.get_messages_from_prompt(prompt) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model="gpt-3.5-turbo", messages=messages, temperature=overrides.get("temperature") or 0.7</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s>pt-3.5-turbo", messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1, stop=["<|im_end|>", "<|im_start|>"]) chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chatContent, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach prompt_prefix = """<|im_start|>system Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} Sources: {sources} <|im_end|> {chat_history} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_messages_from_prompt(prompt: str) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False ===========unchanged ref 2=========== OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
695d6901b06911771cd798ef9017afa373c05ca6
add message construction and validate tokens
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") if prompt_override is None: prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) else: prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) messages = self.get_messages_from_prompt(prompt) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model="gpt-3.5-turbo", messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1, stop=["<|im_end|>"</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s> max_tokens=1024, n=1, stop=["<|im_end|>", "<|im_start|>"]) chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chatContent, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" prompt_prefix = """<|im_start|>system Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} Sources: {sources} <|im_end|> {chat_history} """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of Sources:. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ ===========unchanged ref 1=========== follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" ===========unchanged ref 2=========== create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
e0db5fefd572d617b0ad7f7b746d25bdfa78bfa5
Address comments
<2>:<add> self.chatgpt_model = chatgpt_model
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, gpt_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, gpt_deployment: str, sourcepage_field: str, content_field: str): <0> self.search_client = search_client <1> self.chatgpt_deployment = chatgpt_deployment <2> self.gpt_deployment = gpt_deployment <3> self.sourcepage_field = sourcepage_field <4> self.content_field = content_field <5>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of Sources:. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.gpt_deployment = gpt_deployment at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None ===========unchanged ref 2=========== prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) completion = openai.Completion.create( engine=self.gpt_deployment, prompt=prompt, temperature=0.0, max_tokens=32, n=1, stop=["\n"]) at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): #Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ - prompt_prefix = """<|im_start|>system - Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. - Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. - For tabular information return it as an html table. Do not return markdown format. - Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. - {follow_up_questions_prompt} - {injected_prompt} - Sources: - {sources} - <|im_end|> - {chat_history} - """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of Sources:. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use</s>
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
e0db5fefd572d617b0ad7f7b746d25bdfa78bfa5
Address comments
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") if prompt_override is None: prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) else: prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) print(messages) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model="gpt-3.5-turbo", messages=messages, temperature=overrides.get("</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s>, model="gpt-3.5-turbo", messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chatContent, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach prompt_prefix = """<|im_start|>system Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} Sources: {sources} <|im_end|> {chat_history} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str ===========unchanged ref 1=========== get_messages_from_history(prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, gpt_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, gpt_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment + self.chatgpt_model = chatgpt_model self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
e0db5fefd572d617b0ad7f7b746d25bdfa78bfa5
Address comments
<10>:<add> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) <del> token_count = token_count + self.num_tokens_from_messages(messages, "gpt-3.5-turbo") <15>:<add> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) <del> token_count = token_count + self.num_tokens_from_messages(messages, "gpt-3.5-turbo")
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: <0> messages = [] <1> token_count = 0 <2> if prompt_override is None: <3> system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) <4> elif prompt_override.startswith(">>>"): <5> system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) <6> else: <7> system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) <8> <9> messages.append({"role":self.SYSTEM, "content": system_message}) <10> token_count = token_count + self.num_tokens_from_messages(messages, "gpt-3.5-turbo") <11> <12> #latest conversation <13> userContent = history[-1]["user"] + "\nSources:" + sources <14> messages.append({"role": self.USER, "content": userContent}) <15> token_count = token_count + self.num_tokens_from_messages(messages, "gpt-3.5-turbo") <16> <17> ''' <18> Enqueue in reverse order <19> if limit exceeds truncate old messages <20> leaving system message behind <21> ''' <22> for h in reversed(history[:-1]): <23> if h.get("bot"): <24> messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) <25> messages.insert(1, {"role": self.USER, "content" : h.get("user")</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 token_count = token_count + self.num_tokens_from_messages(messages, "gpt-3.5-turbo") if token_count > approx_max_tokens*4: break return messages ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" at: tiktoken.model encoding_for_model(model_name: str) -> Encoding at: typing Sequence = _alias(collections.abc.Sequence, 1) ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, gpt_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, gpt_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment + self.chatgpt_model = chatgpt_model self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): #Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ - prompt_prefix = """<|im_start|>system - Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. - Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. - For tabular information return it as an html table. Do not return markdown format. - Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. - {follow_up_questions_prompt} - {injected_prompt} - Sources: - {sources} - <|im_end|> - {chat_history} - """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of Sources:. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use</s> ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s>. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # STEP 1: Generate an optimized keyword search query based on the chat history and the last question prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) completion = openai.Completion.create( engine=self.gpt_deployment, prompt=prompt, temperature=0.0, max_tokens=32, n=1, stop=["\n"]) q = completion.choices[0].text # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query if overrides.get("semantic_ranker"): r = self.search_client.search(q, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None) else: r = self.search_client.search(q, filter=filter, top=top) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else</s>
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
3a0b9c7a8363e38f4c014dabfb09292af703adce
add model validation for tiktok
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") if prompt_override is None: prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) else: prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) print(messages) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature")</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s>, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chatContent, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of Sources:. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_messages_from_history(prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 2=========== at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.num_tokens_from_messages
Modified
Azure-Samples~azure-search-openai-demo
3a0b9c7a8363e38f4c014dabfb09292af703adce
add model validation for tiktok
<0>:<add> encoding = tiktoken.encoding_for_model(self.get_oai_chatmodel_tiktok(model)) <del> encoding = tiktoken.encoding_for_model(model)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def num_tokens_from_messages(self, messages, model: str): <0> encoding = tiktoken.encoding_for_model(model) <1> num_tokens = 0 <2> for message in messages: <3> num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n <4> for key, value in message.items(): <5> num_tokens += len(encoding.encode(value)) <6> if key == "name": # if there's a name, the role is omitted <7> num_tokens += -1 # role is always required and always 1 token <8> num_tokens += 2 # every reply is primed with <im_start>assistant <9> return num_tokens <10>
===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # STEP 1: Generate an optimized keyword search query based on the chat history and the last question prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) completion = openai.Completion.create( engine=self.gpt_deployment, prompt=prompt, temperature=0.0, max_tokens=32, n=1, stop=["\n"]) q = completion.choices[0].text # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query if overrides.get("semantic_ranker"): r = self.search_client.search(q, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None) else: r = self.search_client.search(q, filter=filter, top=top) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 <s>ewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") - if prompt_override is None: - prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) - elif prompt_override.startswith(">>>"): - prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) - else: - prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt) - messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) print(messages) # STEP 3: Generate a</s> ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s> content specific answer using the search results and chat history chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chatContent, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')}
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
900eb7811bfdb60529ab969e895ead717ee3ef07
s
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) print(messages) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chatContent, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of Sources:. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_messages_from_history(prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 2=========== at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
0572986739f72c356bd8e7c749b08f4c51dbae5b
address cmments
<13>:<add> userContent = history[-1]["user"] + " \nSources:" + sources <del> userContent = history[-1]["user"] + "\nSources:" + sources
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: <0> messages = [] <1> token_count = 0 <2> if prompt_override is None: <3> system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) <4> elif prompt_override.startswith(">>>"): <5> system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) <6> else: <7> system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) <8> <9> messages.append({"role":self.SYSTEM, "content": system_message}) <10> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) <11> <12> #latest conversation <13> userContent = history[-1]["user"] + "\nSources:" + sources <14> messages.append({"role": self.USER, "content": userContent}) <15> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) <16> <17> ''' <18> Enqueue in reverse order <19> if limit exceeds truncate old messages <20> leaving system message behind <21> ''' <22> for h in reversed(history[:-1]): <23> if h.get("bot"): <24> messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) <25> messages.insert(1, {"role": self.USER, "content" : h.get("user")}) <26> token_</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 if token_count > approx_max_tokens*4: break return messages ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ num_tokens_from_messages(self, messages, model: str) num_tokens_from_messages(messages, model: str) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.chatgpt_model = chatgpt_model at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): #Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. + Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. - Answer ONLY with the facts listed in the list of Sources:. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s> after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.num_tokens_from_messages
Modified
Azure-Samples~azure-search-openai-demo
0572986739f72c356bd8e7c749b08f4c51dbae5b
address cmments
<3>:<add> num_tokens += 2 # every message follows {role/name}\n{content}\n <del> num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n <8>:<del> num_tokens += 2 # every reply is primed with <im_start>assistant
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def num_tokens_from_messages(self, messages, model: str): <0> encoding = tiktoken.encoding_for_model(self.get_oai_chatmodel_tiktok(model)) <1> num_tokens = 0 <2> for message in messages: <3> num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n <4> for key, value in message.items(): <5> num_tokens += len(encoding.encode(value)) <6> if key == "name": # if there's a name, the role is omitted <7> num_tokens += -1 # role is always required and always 1 token <8> num_tokens += 2 # every reply is primed with <im_start>assistant <9> return num_tokens <10>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach get_oai_chatmodel_tiktok(aoaimodel: str) at: tiktoken.core.Encoding encode(text: str, *, allowed_special: Union[Literal["all"], AbstractSet[str]]=set(), disallowed_special: Union[Literal["all"], Collection[str]]="all") -> list[int] at: tiktoken.model encoding_for_model(model_name: str) -> Encoding ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: messages = [] token_count = 0 if prompt_override is None: system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) else: system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) messages.append({"role":self.SYSTEM, "content": system_message}) token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) #latest conversation + userContent = history[-1]["user"] + " \nSources:" + sources - userContent = history[-1]["user"] + "\nSources:" + sources messages.append({"role": self.USER, "content": userContent}) token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) ''' Enqueue in reverse order if limit exceeds truncate old messages leaving system message behind ''' for h in reversed(history[:-1]): if h.get("bot"): messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) messages.insert(1, {"role": self.USER, "content" : h.get("user")}) token_count = token_count</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 <s>(1, {"role": self.USER, "content" : h.get("user")}) token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) if token_count > approx_max_tokens*4: break return messages ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): #Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. + Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. - Answer ONLY with the facts listed in the list of Sources:. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation</s> ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s> after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
5c75cadd2866503945629b07c3ee2f24f7f06a8b
Update app/backend/approaches/chatreadretrieveread.py
<10>:<add> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <del> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: <0> messages = [] <1> token_count = 0 <2> if prompt_override is None: <3> system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) <4> elif prompt_override.startswith(">>>"): <5> system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) <6> else: <7> system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) <8> <9> messages.append({"role":self.SYSTEM, "content": system_message}) <10> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) <11> <12> #latest conversation <13> userContent = history[-1]["user"] + " \nSources:" + sources <14> messages.append({"role": self.USER, "content": userContent}) <15> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) <16> <17> ''' <18> Enqueue in reverse order <19> if limit exceeds truncate old messages <20> leaving system message behind <21> ''' <22> for h in reversed(history[:-1]): <23> if h.get("bot"): <24> messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) <25> messages.insert(1, {"role": self.USER, "content" : h.get("user")}) <26> token</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 if token_count > approx_max_tokens*4: break return messages ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ num_tokens_from_messages(messages, model: str) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.chatgpt_model = chatgpt_model at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
929bf01ad8ed14b25a77af9d513a108b9d8df2e3
Update app/backend/approaches/chatreadretrieveread.py
<15>:<add> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <del> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: <0> messages = [] <1> token_count = 0 <2> if prompt_override is None: <3> system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) <4> elif prompt_override.startswith(">>>"): <5> system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) <6> else: <7> system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) <8> <9> messages.append({"role":self.SYSTEM, "content": system_message}) <10> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <11> <12> #latest conversation <13> userContent = history[-1]["user"] + " \nSources:" + sources <14> messages.append({"role": self.USER, "content": userContent}) <15> token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) <16> <17> ''' <18> Enqueue in reverse order <19> if limit exceeds truncate old messages <20> leaving system message behind <21> ''' <22> for h in reversed(history[:-1]): <23> if h.get("bot"): <24> messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) <25> messages.insert(1, {"role": self.USER, "content" : h.get("user")}) <26> token_count = token</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 if token_count > approx_max_tokens*4: break return messages ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ num_tokens_from_messages(messages, model: str) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.chatgpt_model = chatgpt_model at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
501c6ca7067479f96b1aea872a954373199d3cfb
Update app/backend/approaches/chatreadretrieveread.py
<14>:<add> messages.append({"role": self.USER, "content": user_content}) <del> messages.append({"role": self.USER, "content": userContent})
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: <0> messages = [] <1> token_count = 0 <2> if prompt_override is None: <3> system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) <4> elif prompt_override.startswith(">>>"): <5> system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) <6> else: <7> system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) <8> <9> messages.append({"role":self.SYSTEM, "content": system_message}) <10> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <11> <12> #latest conversation <13> userContent = history[-1]["user"] + " \nSources:" + sources <14> messages.append({"role": self.USER, "content": userContent}) <15> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <16> <17> ''' <18> Enqueue in reverse order <19> if limit exceeds truncate old messages <20> leaving system message behind <21> ''' <22> for h in reversed(history[:-1]): <23> if h.get("bot"): <24> messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) <25> messages.insert(1, {"role": self.USER, "content" : h.get("user")}) <26> token_count = token_count + self</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 if token_count > approx_max_tokens*4: break return messages ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ num_tokens_from_messages(messages, model: str) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.chatgpt_model = chatgpt_model at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
ca9cedb33e9f5bec80dc6d35e377d204ad2b96e6
Update app/backend/approaches/chatreadretrieveread.py
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chatContent, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_messages_from_history(prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False ===========unchanged ref 2=========== OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
2249247fdb3222fa73671027d584e24005515c50
fix token estimation and address counts
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_messages_from_history(prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 2=========== at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
2249247fdb3222fa73671027d584e24005515c50
fix token estimation and address counts
<10>:<add> token_count += self.num_tokens_from_messages(messages[-1], self.chatgpt_model) <del> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <13>:<add> user_content = history[-1]["user"] + " \nSources:" + sources <del> userContent = history[-1]["user"] + " \nSources:" + sources <15>:<add> token_count += token_count + self.num_tokens_from_messages(messages[-1], self.chatgpt_model) <del> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <21>:<add> Keep track of token count for each conversation <add> If token count exceeds limit, break <25>:<add> token_count += self.num_tokens_from_messages(messages[1], self.chatgpt_model) <26>:<add> token_count += self.num_tokens_from_messages(messages[1], self.chatgpt_model) <del> token_count = token_count +
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: <0> messages = [] <1> token_count = 0 <2> if prompt_override is None: <3> system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) <4> elif prompt_override.startswith(">>>"): <5> system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) <6> else: <7> system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) <8> <9> messages.append({"role":self.SYSTEM, "content": system_message}) <10> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <11> <12> #latest conversation <13> userContent = history[-1]["user"] + " \nSources:" + sources <14> messages.append({"role": self.USER, "content": user_content}) <15> token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) <16> <17> ''' <18> Enqueue in reverse order <19> if limit exceeds truncate old messages <20> leaving system message behind <21> ''' <22> for h in reversed(history[:-1]): <23> if h.get("bot"): <24> messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) <25> messages.insert(1, {"role": self.USER, "content" : h.get("user")}) <26> token_count = token_count +</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 if token_count > approx_max_tokens*4: break return messages ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ num_tokens_from_messages(messages, model: str) at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.chatgpt_model = chatgpt_model at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # STEP 1: Generate an optimized keyword search query based on the chat history and the last question prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) completion = openai.Completion.create( engine=self.gpt_deployment, prompt=prompt, temperature=0.0, max_tokens=32, n=1, stop=["\n"]) q = completion.choices[0].text # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query if overrides.get("semantic_ranker"): r = self.search_client.search(q, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None) else: r = self.search_client.search(q, filter=filter, top=top) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 <s>ewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) # STEP 3: Generate a contextual and content specific answer using the search results and chat history + chat_completion = openai.ChatCompletion.create( - chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) + chat_content = chat_completion.choices[0].message.content - chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')}
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.num_tokens_from_messages
Modified
Azure-Samples~azure-search-openai-demo
2249247fdb3222fa73671027d584e24005515c50
fix token estimation and address counts
<2>:<del> for message in messages: <3>:<add> num_tokens += 2 # every message follows {role/name}\n{content}\n <del> num_tokens += 2 # every message follows {role/name}\n{content}\n <4>:<add> for key, value in message.items(): <del> for key, value in message.items(): <5>:<add> num_tokens += len(encoding.encode(value)) <del> num_tokens += len(encoding.encode(value)) <6>:<add> if key == "name": # if there's a name, the role is omitted <del> if key == "name": # if there's a name, the role is omitted <7>:<add> num_tokens += -1 # role is always required and always 1 token <del> num_tokens += -1 # role is always required and always 1 token
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): - + def num_tokens_from_messages(self, message: any, model: str): - def num_tokens_from_messages(self, messages, model: str): <0> encoding = tiktoken.encoding_for_model(self.get_oai_chatmodel_tiktok(model)) <1> num_tokens = 0 <2> for message in messages: <3> num_tokens += 2 # every message follows {role/name}\n{content}\n <4> for key, value in message.items(): <5> num_tokens += len(encoding.encode(value)) <6> if key == "name": # if there's a name, the role is omitted <7> num_tokens += -1 # role is always required and always 1 token <8> return num_tokens <9>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.get_messages_from_history messages = [] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: messages = [] token_count = 0 if prompt_override is None: system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) else: system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) messages.append({"role":self.SYSTEM, "content": system_message}) + token_count += self.num_tokens_from_messages(messages[-1], self.chatgpt_model) - token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) #latest conversation + user_content = history[-1]["user"] + " \nSources:" + sources - userContent = history[-1]["user"] + " \nSources:" + sources messages.append({"role": self.USER, "content": user_content}) + token_count += token_count + self.num_tokens_from_messages(messages[-1], self.chatgpt_model) - token_count += self.num_tokens_from_messages(messages, self.chatgpt_model) ''' Enqueue in reverse order if limit exceeds truncate old messages leaving system message behind + Keep track of token count for each conversation + If token count exceeds limit, break ''' for h in reversed</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Generate messages needed for chat Completion api def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 <s> + Keep track of token count for each conversation + If token count exceeds limit, break ''' for h in reversed(history[:-1]): if h.get("bot"): messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) + token_count += self.num_tokens_from_messages(messages[1], self.chatgpt_model) messages.insert(1, {"role": self.USER, "content" : h.get("user")}) + token_count += self.num_tokens_from_messages(messages[1], self.chatgpt_model) - token_count = token_count + self.num_tokens_from_messages(messages, self.chatgpt_model) if token_count > approx_max_tokens*4: break - return messages ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # STEP 1: Generate an optimized keyword search query based on the chat history and the last question prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) completion = openai.Completion.create( engine=self.gpt_deployment, prompt=prompt, temperature=0.0, max_tokens=32, n=1, stop=["\n"]) q = completion.choices[0].text # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query if overrides.get("semantic_ranker"): r = self.search_client.search(q, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None) else: r = self.search_client.search(q, filter=filter, top=top) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else</s> ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 <s>ewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) # STEP 3: Generate a contextual and content specific answer using the search results and chat history + chat_completion = openai.ChatCompletion.create( - chatCompletion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) + chat_content = chat_completion.choices[0].message.content - chatContent = chatCompletion.choices[0].message.content return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')}
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.num_tokens_from_messages
Modified
Azure-Samples~azure-search-openai-demo
81239a79d2eb392a78778e90b2a23269082fc58d
address comments'
<0>:<add> """ <add> Calculate the number of tokens required to encode a message. <add> Args: <add> message (any): The message to encode, represented as a dictionary. <add> model (str): The name of the model to use for encoding. <add> Returns: <add> int: The total number of tokens required to encode the message. <add> Example: <add> message = {'role': 'user', 'name': 'John', 'content': 'Hello, how are you?'} <add> model = 'gpt-3.5-turbo' <add> num_tokens_from_messages(message, model) <add> output: 11 <add> """ <5>:<del> if key == "name": # if there's a name, the role is omitted <6>:<del> num_tokens += -1 # role is always required and always 1 token
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + + def num_tokens_from_messages(self, message: dict[str,str], model: str): - def num_tokens_from_messages(self, message: any, model: str): <0> encoding = tiktoken.encoding_for_model(self.get_oai_chatmodel_tiktok(model)) <1> num_tokens = 0 <2> num_tokens += 2 # every message follows {role/name}\n{content}\n <3> for key, value in message.items(): <4> num_tokens += len(encoding.encode(value)) <5> if key == "name": # if there's a name, the role is omitted <6> num_tokens += -1 # role is always required and always 1 token <7> return num_tokens <8>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.num_tokens_from_messages encoding = tiktoken.encoding_for_model(self.get_oai_chatmodel_tiktok(model)) at: tiktoken.core.Encoding encode(text: str, *, allowed_special: Union[Literal["all"], AbstractSet[str]]=set(), disallowed_special: Union[Literal["all"], Collection[str]]="all") -> list[int] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): - - ''' - Source: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb - Adapted: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/chatgpt?pivots=programming-language-chat-completions#managing-conversations - - Method takes in a single conversation and calculate prompt tokens - for chat api - - Keys role and content are accounted seperately. - - Values of content are encoded by model type and calculated the length. - - This gives close proximity of token length measurement used in gpt models - - message = {"role":"assistant", "content":"how can I assist you?"} - ''' -
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
e3dc08714d976fe1f112b5e9fdcb758dda9d619f
Merge pull request #407 from Azure-Samples/srbalakr/update-nb
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chat_completion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chat_content = chat_completion.choices[0].message.content return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. If the question is not in English, translate the question to English before generating the search query. Chat History: {chat_history} Question: {question} Search query: """ get_chat_history_as_text(history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str get_messages_from_history(prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False ===========unchanged ref 2=========== OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
scripts.prepdocs/create_sections
Modified
Azure-Samples~azure-search-openai-demo
755188cf7c41e0909904e3d81a001e39640ffe0b
Handle non-ascii filenames (#418)
<0>:<add> file_id = filename_to_id(filename) <2>:<add> "id": f"{file_id}-page-{i}", <del> "id": re.sub("[^0-9a-zA-Z_-]","_",f"{filename}-{i}"),
# module: scripts.prepdocs def create_sections(filename, page_map): <0> for i, (section, pagenum) in enumerate(split_text(page_map)): <1> yield { <2> "id": re.sub("[^0-9a-zA-Z_-]","_",f"{filename}-{i}"), <3> "content": section, <4> "category": args.category, <5> "sourcepage": blob_name_from_file_page(filename, pagenum), <6> "sourcefile": filename <7> } <8>
===========unchanged ref 0=========== at: scripts.prepdocs args = parser.parse_args() at: scripts.prepdocs.create_search_index index_client = SearchIndexClient(endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds) index = SearchIndex( name=args.index, fields=[ SimpleField(name="id", type="Edm.String", key=True), SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True) ], semantic_settings=SemanticSettings( configurations=[SemanticConfiguration( name='default', prioritized_fields=PrioritizedFields( title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]) ) ===========changed ref 0=========== # module: scripts.prepdocs + def filename_to_id(filename): + filename_ascii = re.sub("[^0-9a-zA-Z_-]", "_", filename) + filename_hash = base64.b16encode(filename.encode('utf-8')).decode('ascii') + return f"file-{filename_ascii}-{filename_hash}" + ===========changed ref 1=========== + # module: scripts.test_prepdocs + + ===========changed ref 2=========== + # module: scripts.test_prepdocs + def test_filename_to_id(): + # test ascii filename + assert filename_to_id("foo.pdf") == "file-foo_pdf-666F6F2E706466" + # test filename containing unicode + assert filename_to_id("foo\u00A9.txt") == "file-foo__txt-666F6FC2A92E747874" + # test filenaming starting with unicode + assert filename_to_id("ファイル名.pdf") == "file-______pdf-E38395E382A1E382A4E383ABE5908D2E706466" + ===========changed ref 3=========== # module: scripts.prepdocs MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 - parser = argparse.ArgumentParser( - description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", - epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v" - ) - parser.add_argument("files", help="Files to be processed") - parser.add_argument("--category", help="Value for the category field in the search index for all sections indexed in this run") - parser.add_argument("--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage") - parser.add_argument("--storageaccount", help="Azure Blob Storage account name") - parser.add_argument("--container", help="Azure Blob Storage container name") - parser.add_argument("--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)") - parser.add_argument("--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)") - parser.add_argument("--searchservice", help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)") - parser.add_argument("--index", help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)") - parser.add_argument("--searchkey", required=False, help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)") - parser.add_argument("--remove", action="store_true", help="Remove references to this document from blob storage and the search index") - parser.add_argument("--removeall", action="store_true", help="Remove all blobs from blob storage and documents from the search index") - parser.add_argument("--localpdfparser</s> ===========changed ref 4=========== # module: scripts.prepdocs # offset: 1 <s>_true", help="Remove all blobs from blob storage and documents from the search index") - parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents") - parser.add_argument("--formrecognizerservice", required=False, help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)") - parser.add_argument("--formrecognizerkey", required=False, help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)") - parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") - args = parser.parse_args() + if __name__ == "__main__": - # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them - azd_credential = AzureDeveloperCliCredential() if args.tenantid == None else AzureDeveloperCliCredential(tenant_id=args.tenantid, process_timeout=60) - default_creds = azd_credential if args.searchkey == None or args.storagekey == None else None - search_creds = default_creds if args.searchkey == None else AzureKeyCredential(args.searchkey) - if not args.skipblobs: - storage_creds = default_creds if args.storagekey == None else args.storagekey - if not args.localpdfparser: - # check if Azure Form Recognizer credentials are provided - if args.formrecognizerservice == None: - print("Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser.") + parser = argparse.ArgumentParser( + description="Prepare documents by</s>
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
6bfb2ccfdb0c2e15a3285465b031939a43f11eac
Migration Completion api to chat completion api (#419)
<3>:<del> self.gpt_deployment = gpt_deployment <6>:<add> self.chatgpt_token_limit = get_token_limit(chatgpt_model)
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, gpt_deployment: str, sourcepage_field: str, content_field: str): <0> self.search_client = search_client <1> self.chatgpt_deployment = chatgpt_deployment <2> self.chatgpt_model = chatgpt_model <3> self.gpt_deployment = gpt_deployment <4> self.sourcepage_field = sourcepage_field <5> self.content_field = content_field <6>
===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. Search Query: """ query_prompt_few_shots = [ {'role' : USER, 'content' : 'What are my health plans?' }, {'role' : ASSISTANT, 'content' : 'Show available health plans' }, {'role' : USER, 'content' : 'does my plan cover cardio?' }, {'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' } ] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include c</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s> plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. + Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. + Search Query: + """ + query_prompt_few_shots = [ + {'role' : USER, 'content' : 'What are my health plans?' }, + {'role' : ASSISTANT, 'content' : 'Show available health plans' }, + {'role' : USER, 'content' : 'does my plan cover cardio?' }, + {'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' } + ] - Chat History: - {chat_history} - Question: - {question} - - Search query: - """ - ===========changed ref 2=========== + # module: app.backend.core.messagebuilder + + ===========changed ref 3=========== + # module: app.backend.core.modelhelper + + ===========changed ref 4=========== + # module: app.backend.core.messagebuilder + class MessageBuilder: + def append_message(self, role: str, content: str, index: int = 1): + self.messages.insert(index, {'role': role, 'content': content}) + self.token_length += num_tokens_from_messages( + self.messages[index], self.model) + ===========changed ref 5=========== + # module: app.backend.core.modelhelper + def get_oai_chatmodel_tiktok(aoaimodel: str) -> str: + if aoaimodel == "" or aoaimodel is None: + raise ValueError("Expected AOAI chatGPT model name") + + return AOAI_2_OAI.get(aoaimodel) + ===========changed ref 6=========== + # module: app.backend.core.modelhelper + def get_token_limit(model_id: str) -> int: + if model_id not in MODELS_2_TOKEN_LIMITS: + raise ValueError("Expected Model Gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS.get(model_id) + ===========changed ref 7=========== + # module: app.backend.core.messagebuilder + class MessageBuilder: + def __init__(self, system_content: str, chatgpt_model: str): + self.messages = [{'role': 'system', 'content': system_content}] + self.model = chatgpt_model + self.token_length = num_tokens_from_messages( + self.messages[-1], self.model) +
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
6bfb2ccfdb0c2e15a3285465b031939a43f11eac
Migration Completion api to chat completion api (#419)
<5>:<add> user_q = 'Generate search query for: ' + history[-1]["user"] <add> <6>:<add> messages = self.get_messages_from_history( <add> self.query_prompt_template, <add> self.chatgpt_model, <add> history, <add> user_q, <add> self.query_prompt_few_shots, <add> self.chatgpt_token_limit - len(user_q) <add> ) <add> <del> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7>:<add> chat_completion = openai.ChatCompletion.create( <del> completion = openai.Completion.create( <8>:<add> deployment_id=self.chatgpt_deployment, <del> engine=self.gpt_deployment, <9>:<add> model=self.chatgpt_model, <add> messages=messages, <del> prompt=prompt, <12>:<add> n=1) <del> n=1, <13>:<add> <del> stop=["\n"]) <14>:<add> q = chat_completion.choices[0].message.content <del> q = completion.choices[0].text
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <6> prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"]) <7> completion = openai.Completion.create( <8> engine=self.gpt_deployment, <9> prompt=prompt, <10> temperature=0.0, <11> max_tokens=32, <12> n=1, <13> stop=["\n"]) <14> q = completion.choices[0].text <15> <16> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <17> if overrides.get("semantic_ranker"): <18> r = self.search_client.search(q, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None) <26> else: <27> r = self.search_client.search(q, filter=filter, top=top) <28> if use_semantic_captions: <29> results = [doc[self.sourcepage_field] + ": " + non</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_template") messages = self.get_messages_from_history(prompt_override=prompt_override, follow_up_questions_prompt=follow_up_questions_prompt,history=history, sources=content) # STEP 3: Generate a contextual and content specific answer using the search results and chat history chat_completion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chat_content = chat_completion.choices[0].message.content msg_to_display = '\n\n'.join([str(message) for message in messages]) return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{q}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. Search Query: """ query_prompt_few_shots = [ {'role' : USER, 'content' : 'What are my health plans?' }, {'role' : ASSISTANT, 'content' : 'Show available health plans' }, {'role' : USER, 'content' : 'does my plan cover cardio?' }, {'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' } ] ===========unchanged ref 1=========== get_messages_from_history(system_prompt: str, model_id: str, history: Sequence[dict[str, str]], user_conv: str, few_shots=[], max_tokens: int=4096) -> [] get_messages_from_history(self, system_prompt: str, model_id: str, history: Sequence[dict[str, str]], user_conv: str, few_shots=[], max_tokens: int=4096) -> [] get_messages_from_history(prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int=1000) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: core.modelhelper get_token_limit(model_id: str) -> int at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str ===========unchanged ref 2=========== at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, gpt_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model - self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field + self.chatgpt_token_limit = get_token_limit(chatgpt_model)
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
6bfb2ccfdb0c2e15a3285465b031939a43f11eac
Migration Completion api to chat completion api (#419)
<0>:<del> ''' <1>:<del> Generate messages needed for chat Completion api <2>:<del> ''' <3>:<del> messages = [] <4>:<del> token_count = 0 <5>:<del> if prompt_override is None: <6>:<del> system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) <7>:<del> elif prompt_override.startswith(">>>"): <8>:<del> system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) <9>:<del> else: <10>:<del> system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) <11>:<add> message_builder = MessageBuilder(system_prompt, model_id) <12>:<del> messages.append({"role":self.SYSTEM, "content": system_message}) <13>:<del> token_count += self.num_tokens_from_messages(messages[-1], self.chatgpt_model) <14>:<del> <15>:<del> # latest conversation <16>:<del> user_content = history[-1]["user"] + " \nSources:" + sources <17>:<del> messages.append({"role": self.USER, "content": user_content}) <18>:<del> token_count += token_count + self.num_tokens_from_messages(messages[-1], self.chatgpt_model) <19>:<add> # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. <add> for shot in few_shots: <add> message_builder.append_message(shot.get('role'), shot.get('content')) <20>:<del> ''' <21>:<del> Enqueue in reverse order <22>:<del> if limit exceeds truncate old messages <23>:<del> leaving system message behind <24>:<del> Keep track of token count for each conversation <25>:<del> If token count exceeds limit, break <26>:<del> ''' <27>:<add> user_content = user_conv <add> append_index = len(few_shots) + 1 <add> <add> message_builder.append_message(self.USER, user_content, index=append_index) <add>
<s>ReadRetrieveReadApproach(Approach): + def get_messages_from_history(self, system_prompt: str, model_id: str, history: Sequence[dict[str, str]], user_conv: str, few_shots = [], max_tokens: int = 4096) -> []: - def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: <0> ''' <1> Generate messages needed for chat Completion api <2> ''' <3> messages = [] <4> token_count = 0 <5> if prompt_override is None: <6> system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) <7> elif prompt_override.startswith(">>>"): <8> system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) <9> else: <10> system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) <11> <12> messages.append({"role":self.SYSTEM, "content": system_message}) <13> token_count += self.num_tokens_from_messages(messages[-1], self.chatgpt_model) <14> <15> # latest conversation <16> user_content = history[-1]["user"] + " \nSources:" + sources <17> messages.append({"role": self.USER, "content": user_content}) <18> token_count += token_count + self.num_tokens_from_messages(messages[-1], self.chatgpt_model) <19> <20> ''' <21> Enqueue in reverse order <22> if limit exceeds truncate old messages <23> leaving system message behind <24> Keep track of token count for each conversation <25> If token count exceeds limit, break <26> ''' <27> for h in reversed(history[:</s>
===========below chunk 0=========== <s>(Approach): + def get_messages_from_history(self, system_prompt: str, model_id: str, history: Sequence[dict[str, str]], user_conv: str, few_shots = [], max_tokens: int = 4096) -> []: - def get_messages_from_history(self, prompt_override, follow_up_questions_prompt, history: Sequence[dict[str, str]], sources: str, approx_max_tokens: int = 1000) -> []: # offset: 1 if h.get("bot"): messages.insert(1, {"role": self.ASSISTANT, "content" : h.get("bot")}) token_count += self.num_tokens_from_messages(messages[1], self.chatgpt_model) messages.insert(1, {"role": self.USER, "content" : h.get("user")}) token_count += self.num_tokens_from_messages(messages[1], self.chatgpt_model) if token_count > approx_max_tokens*4: break return messages ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.run content = "\n".join(results) at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): - - def get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str: - history_text = "" - for h in reversed(history if include_last_turn else history[:-1]): - history_text = """<|im_start|>user""" + "\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot", "") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text - if len(history_text) > approx_max_tokens*4: - break - return history_text - ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, gpt_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model - self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field + self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 2=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include c</s> ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s> plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. + Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. + Search Query: + """ + query_prompt_few_shots = [ + {'role' : USER, 'content' : 'What are my health plans?' }, + {'role' : ASSISTANT, 'content' : 'Show available health plans' }, + {'role' : USER, 'content' : 'does my plan cover cardio?' }, + {'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' } + ] - Chat History: - {chat_history} - Question: - {question} - - Search query: - """ - ===========changed ref 4=========== + # module: app.backend.core.messagebuilder + + ===========changed ref 5=========== + # module: app.backend.core.modelhelper + +
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
6bfb2ccfdb0c2e15a3285465b031939a43f11eac
Migration Completion api to chat completion api (#419)
<2>:<add> self.chatgpt_model = chatgpt_model
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): <0> self.search_client = search_client <1> self.openai_deployment = openai_deployment <2> self.sourcepage_field = sourcepage_field <3> self.content_field = content_field <4>
===========changed ref 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ + system_chat_template = \ - template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ + "If you cannot answer using the sources below, say you don't know. Use below example to answer" - "If you cannot answer using the sources below, say you don't know. " + \ - """ + #shots/sample conversation + question = """ - ### + 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' - Question: 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region + """ + </s> ===========changed ref 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): # offset: 1 <s> info4.pdf: In-network institutions include Overlake, Swedish and others in the region + """ + answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." - Answer: - In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]. - - ### - Question: '{q}'? - - Sources: - {retrieved} - - Answer: - """ - ===========changed ref 2=========== + # module: app.backend.core.messagebuilder + + ===========changed ref 3=========== + # module: app.backend.core.modelhelper + + ===========changed ref 4=========== + # module: app.backend.core.messagebuilder + class MessageBuilder: + def append_message(self, role: str, content: str, index: int = 1): + self.messages.insert(index, {'role': role, 'content': content}) + self.token_length += num_tokens_from_messages( + self.messages[index], self.model) + ===========changed ref 5=========== + # module: app.backend.core.modelhelper + def get_oai_chatmodel_tiktok(aoaimodel: str) -> str: + if aoaimodel == "" or aoaimodel is None: + raise ValueError("Expected AOAI chatGPT model name") + + return AOAI_2_OAI.get(aoaimodel) + ===========changed ref 6=========== + # module: app.backend.core.modelhelper + def get_token_limit(model_id: str) -> int: + if model_id not in MODELS_2_TOKEN_LIMITS: + raise ValueError("Expected Model Gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS.get(model_id) + ===========changed ref 7=========== + # module: app.backend.core.messagebuilder + class MessageBuilder: + def __init__(self, system_content: str, chatgpt_model: str): + self.messages = [{'role': 'system', 'content': system_content}] + self.model = chatgpt_model + self.token_length = num_tokens_from_messages( + self.messages[-1], self.model) + ===========changed ref 8=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): - def get_oai_chatmodel_tiktok(self, aoaimodel: str): - if aoaimodel == "" or aoaimodel is None: - raise Exception("Expected AOAI chatGPT model name") - - return "gpt-3.5-turbo" if aoaimodel == "gpt-35-turbo" else aoaimodel - ===========changed ref 9=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, gpt_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model - self.gpt_deployment = gpt_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field + self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 10=========== + # module: app.backend.core.messagebuilder + class MessageBuilder: + """ + A class for building and managing messages in a chat conversation. + Attributes: + message (list): A list of dictionaries representing chat messages. + model (str): The name of the ChatGPT model. + token_count (int): The total number of tokens in the conversation. + Methods: + __init__(self, system_content: str, chatgpt_model: str): Initializes the MessageBuilder instance. + append_message(self, role: str, content: str, index: int = 1): Appends a new message to the conversation. + """ + ===========changed ref 11=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): - - def get_chat_history_as_text(self, history: Sequence[dict[str, str]], include_last_turn: bool=True, approx_max_tokens: int=1000) -> str: - history_text = "" - for h in reversed(history if include_last_turn else history[:-1]): - history_text = """<|im_start|>user""" + "\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot", "") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text - if len(history_text) > approx_max_tokens*4: - break - return history_text -
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
6bfb2ccfdb0c2e15a3285465b031939a43f11eac
Migration Completion api to chat completion api (#419)
<22>:<add> message_builder = MessageBuilder(overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model); <add> <add> # add user question <add> user_content = q + "\n" + "Sources:\n {content}".format(content=content) <add> message_builder.append_message('user', user_content) <add> <add> # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. <add> message_builder.append_message('assistant', self.answer) <add> message_builder.append_message('user', self.question) <add> <add> messages = message_builder.messages <del> prompt = (overrides.get("prompt_template") or self.template).format(q=q, retrieved=content) <23>:<add> chat_completion = openai.ChatCompletion.create( <del> completion = openai.Completion.create( <24>:<add> deployment_id=self.openai_deployment, <del> engine=self.openai_deployment, <25>:<add> model=self.chatgpt_model, <add> messages=messages, <del> prompt=prompt, <28>:<add> n=1
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> if overrides.get("semantic_ranker"): <6> r = self.search_client.search(q, <7> filter=filter, <8> query_type=QueryType.SEMANTIC, <9> query_language="en-us", <10> query_speller="lexicon", <11> semantic_configuration_name="default", <12> top=top, <13> query_caption="extractive|highlight-false" if use_semantic_captions else None) <14> else: <15> r = self.search_client.search(q, filter=filter, top=top) <16> if use_semantic_captions: <17> results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] <18> else: <19> results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] <20> content = "\n".join(results) <21> <22> prompt = (overrides.get("prompt_template") or self.template).format(q=q, retrieved=content) <23> completion = openai.Completion.create( <24> engine=self.openai_deployment, <25> prompt=prompt, <26> temperature=overrides.get("temperature") or 0.3, <27> max_tokens=1024, <28> n=1, <29> stop=["</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 return {"data_points": results, "answer": completion.choices[0].text, "thoughts": f"Question:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ "If you cannot answer using the sources below, say you don't know. " + \ """ ### Question: 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region Answer: In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]. ### Question: '{q}'? Sources: {retrieved} Answer: """ at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_deployment = openai_deployment ===========unchanged ref 1=========== self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: openai.api_resources.completion Completion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.completion.Completion OBJECT_NAME = "completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment + self.chatgpt_model = chatgpt_model self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ + system_chat_template = \ - template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ + "If you cannot answer using the sources below, say you don't know. Use below example to answer" - "If you cannot answer using the sources below, say you don't know. " + \ - """ + #shots/sample conversation + question = """ - ### + 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' - Question: 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region + """ + </s> ===========changed ref 2=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): # offset: 1 <s> info4.pdf: In-network institutions include Overlake, Swedish and others in the region + """ + answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." - Answer: - In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]. - - ### - Question: '{q}'? - - Sources: - {retrieved} - - Answer: - """ - ===========changed ref 3=========== + # module: app.backend.core.messagebuilder + + ===========changed ref 4=========== + # module: app.backend.core.modelhelper + + ===========changed ref 5=========== + # module: app.backend.core.messagebuilder + class MessageBuilder: + def append_message(self, role: str, content: str, index: int = 1): + self.messages.insert(index, {'role': role, 'content': content}) + self.token_length += num_tokens_from_messages( + self.messages[index], self.model) +
scripts.prepdocs/create_sections
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<1>:<add> for i, (content, pagenum) in enumerate(split_text(page_map)): <del> for i, (section, pagenum) in enumerate(split_text(page_map)): <2>:<add> section = { <del> yield { <4>:<add> "content": content, <del> "content": section, <9>:<add> if use_vectors: <add> section["embedding"] = compute_embedding(content) <add> yield section
# module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors): - def create_sections(filename, page_map): <0> file_id = filename_to_id(filename) <1> for i, (section, pagenum) in enumerate(split_text(page_map)): <2> yield { <3> "id": f"{file_id}-page-{i}", <4> "content": section, <5> "category": args.category, <6> "sourcepage": blob_name_from_file_page(filename, pagenum), <7> "sourcefile": filename <8> } <9>
===========unchanged ref 0=========== at: scripts.prepdocs blob_name_from_file_page(filename, page=0) split_text(page_map) filename_to_id(filename) args = parser.parse_args()
scripts.prepdocs/create_search_index
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<9>:<add> SearchField(name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), <add> hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, <add> vector_search_dimensions=1536, vector_search_configuration="default"), <17>:<add> title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]), <del> title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]) <18>:<add> vector_search=VectorSearch( <add> algorithm_configurations=[ <add> VectorSearchAlgorithmConfiguration( <add> name="default", <add> kind="hnsw", <add> hnsw_parameters=HnswParameters(metric="cosine") <add> ) <add> ] <add> ) <add> ) <del> )
# module: scripts.prepdocs def create_search_index(): <0> if args.verbose: print(f"Ensuring search index {args.index} exists") <1> index_client = SearchIndexClient(endpoint=f"https://{args.searchservice}.search.windows.net/", <2> credential=search_creds) <3> if args.index not in index_client.list_index_names(): <4> index = SearchIndex( <5> name=args.index, <6> fields=[ <7> SimpleField(name="id", type="Edm.String", key=True), <8> SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), <9> SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), <10> SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), <11> SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True) <12> ], <13> semantic_settings=SemanticSettings( <14> configurations=[SemanticConfiguration( <15> name='default', <16> prioritized_fields=PrioritizedFields( <17> title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]) <18> ) <19> if args.verbose: print(f"Creating {args.index} search index") <20> index_client.create_index(index) <21> else: <22> if args.verbose: print(f"Search index {args.index} already exists") <23>
===========unchanged ref 0=========== at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: scripts.prepdocs args = parser.parse_args() search_creds = default_creds if args.searchkey == None else AzureKeyCredential(args.searchkey) at: tenacity retry(stop: "StopBaseT"=stop_never, wait: "WaitBaseT"=wait_none(), retry: "RetryBaseT"=retry_if_exception_type(), before: t.Callable[["RetryCallState"], None]=before_nothing, after: t.Callable[["RetryCallState"], None]=after_nothing, before_sleep: t.Optional[t.Callable[["RetryCallState"], None]]=None, reraise: bool=False, retry_error_cls: t.Type[RetryError]=RetryError, retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]]=None, *, sleep: t.Callable[[t.Union[int, float]], None]=sleep) -> t.Any at: tenacity.stop stop_after_attempt(max_attempt_number: int) ===========unchanged ref 1=========== at: tenacity.wait wait_random_exponential(multiplier: typing.Union[int, float]=1, max: _utils.time_unit_type=_utils.MAX_WAIT, exp_base: typing.Union[int, float]=2, min: _utils.time_unit_type=0) ===========changed ref 0=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors): - def create_sections(filename, page_map): file_id = filename_to_id(filename) + for i, (content, pagenum) in enumerate(split_text(page_map)): - for i, (section, pagenum) in enumerate(split_text(page_map)): + section = { - yield { "id": f"{file_id}-page-{i}", + "content": content, - "content": section, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } + if use_vectors: + section["embedding"] = compute_embedding(content) + yield section
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<2>:<add> self.embedding_deployment = embedding_deployment
# module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): <0> self.search_client = search_client <1> self.openai_deployment = openai_deployment <2> self.sourcepage_field = sourcepage_field <3> self.content_field = content_field <4>
===========changed ref 0=========== # module: scripts.prepdocs + def before_retry_sleep(retry_state): + if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") + ===========changed ref 1=========== # module: scripts.prepdocs + @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text): + return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] + ===========changed ref 2=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors): - def create_sections(filename, page_map): file_id = filename_to_id(filename) + for i, (content, pagenum) in enumerate(split_text(page_map)): - for i, (section, pagenum) in enumerate(split_text(page_map)): + section = { - yield { "id": f"{file_id}-page-{i}", + "content": content, - "content": section, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } + if use_vectors: + section["embedding"] = compute_embedding(content) + yield section ===========changed ref 3=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient(endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=[ SimpleField(name="id", type="Edm.String", key=True), SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), + SearchField(name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, + vector_search_dimensions=1536, vector_search_configuration="default"), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True) ], semantic_settings=SemanticSettings( configurations=[SemanticConfiguration( name='default', prioritized_fields=PrioritizedFields( + title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]), - title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]) + vector_search=VectorSearch( + algorithm_configurations=[ + VectorSearchAlgorithmConfiguration( + name="default", + kind="hnsw", + hnsw_parameters=HnswParameters(metric="cosine") + ) + ] + ) + ) - ) if args.verbose: print(f"Creating {args.index} search index") index</s> ===========changed ref 4=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s> <add> ) - ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists") ===========changed ref 5=========== # module: scripts.prepdocs if __name__ == "__main__": parser = argparse.ArgumentParser( description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.", epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v" ) parser.add_argument("files", help="Files to be processed") parser.add_argument("--category", help="Value for the category field in the search index for all sections indexed in this run") parser.add_argument("--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage") parser.add_argument("--storageaccount", help="Azure Blob Storage account name") parser.add_argument("--container", help="Azure Blob Storage container name") parser.add_argument("--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)") parser.add_argument("--searchservice", help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)") parser.add_argument("--index", help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)") parser.add_argument("--searchkey", required=False, help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)") + parser.add_argument("--openaiservice", help="Name of the Azure OpenAI service used to compute embeddings") + parser.add_argument("--openaideployment", help="Name of the Azure OpenAI model deployment for an embedding model ('text-embedding-ada-002' recommended)") + parser.add_argument("--novectors", action="store_true", help="Don't</s> ===========changed ref 6=========== # module: scripts.prepdocs # offset: 1 <s>-002' recommended)") + parser.add_argument("--novectors", action="store_true", help="Don't compute embeddings for the sections (e.g. don't call the OpenAI embeddings API during indexing)") + parser.add_argument("--openaikey", required=False, help="Optional. Use this Azure OpenAI account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--remove", action="store_true", help="Remove references to this document from blob storage and the search index") parser.add_argument("--removeall", action="store_true", help="Remove all blobs from blob storage and documents from the search index") parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents") parser.add_argument("--formrecognizerservice", required=False, help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)") parser.add_argument("--formrecognizerkey", required=False, help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)") parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") args = parser.parse_args() # Use the current user identity to connect to Azure services unless a key is explicitly set for any of them azd_credential = AzureDeveloperCliCredential() if args.tenantid == None else AzureDeveloperCliCredential(tenant_id=args.tenantid, process_timeout=60) default_creds = azd_credential if args.searchkey == None or args.storagekey == None else None search_creds = default_creds if args.searchkey</s>
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.retrieve
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<0>:<add> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <add> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <add> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <del> use_semantic_captions = True if overrides.get("semantic_captions") else False <5>:<add> # If retrieval mode includes vectors, compute an embedding for the query <add> if has_vector: <add> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] <add> else: <add> query_vector = None <add> <add> # Only keep the text query if the retrieval mode uses text, otherwise drop it <add> if not has_text: <add> query_text = None <add> <add> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <add> if overrides.get("semantic_ranker") and has_text: <del> if overrides.get("semantic_ranker"): <6>:<add> r = self.search_client
# module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, q: str, overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> if overrides.get("semantic_ranker"): <6> r = self.search_client.search(q, <7> filter=filter, <8> query_type=QueryType.SEMANTIC, <9> query_language="en-us", <10> query_speller="lexicon", <11> semantic_configuration_name="default", <12> top = top, <13> query_caption="extractive|highlight-false" if use_semantic_captions else None) <14> else: <15> r = self.search_client.search(q, filter=filter, top=top) <16> if use_semantic_captions: <17> self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) for doc in r] <18> else: <19> self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) for doc in r] <20> content = "\n".join(self.results) <21> return content <22>
===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \ "Answer the question using only the data provided in the information sources below. " \ "For tabular information return it as an html table. Do not return markdown format. " \ "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \ "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \ "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \ "If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \ "Never quote tool names as sources." \ "If you cannot answer using the sources below, say that you don't know. " \ "\n\nYou can access to the following tools:" template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment ===========unchanged ref 1=========== at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 1=========== # module: scripts.prepdocs + def before_retry_sleep(retry_state): + if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") + ===========changed ref 2=========== # module: scripts.prepdocs + @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text): + return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] + ===========changed ref 3=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors): - def create_sections(filename, page_map): file_id = filename_to_id(filename) + for i, (content, pagenum) in enumerate(split_text(page_map)): - for i, (section, pagenum) in enumerate(split_text(page_map)): + section = { - yield { "id": f"{file_id}-page-{i}", + "content": content, - "content": section, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } + if use_vectors: + section["embedding"] = compute_embedding(content) + yield section ===========changed ref 4=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient(endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=[ SimpleField(name="id", type="Edm.String", key=True), SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), + SearchField(name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, + vector_search_dimensions=1536, vector_search_configuration="default"), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True) ], semantic_settings=SemanticSettings( configurations=[SemanticConfiguration( name='default', prioritized_fields=PrioritizedFields( + title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]), - title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]) + vector_search=VectorSearch( + algorithm_configurations=[ + VectorSearchAlgorithmConfiguration( + name="default", + kind="hnsw", + hnsw_parameters=HnswParameters(metric="cosine") + ) + ] + ) + ) - ) if args.verbose: print(f"Creating {args.index} search index") index</s> ===========changed ref 5=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s> <add> ) - ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists")
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<3>:<add> self.embedding_deployment = embedding_deployment
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): <0> self.search_client = search_client <1> self.chatgpt_deployment = chatgpt_deployment <2> self.chatgpt_model = chatgpt_model <3> self.sourcepage_field = sourcepage_field <4> self.content_field = content_field <5> self.chatgpt_token_limit = get_token_limit(chatgpt_model) <6>
===========unchanged ref 0=========== at: core.modelhelper get_token_limit(model_id: str) -> int ===========changed ref 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # Chat roles SYSTEM = "system" USER = "user" ASSISTANT = "assistant" """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. + For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. - For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. + Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. - Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. + Try not to repeat questions that have already been asked. - Try not to repeat questions that have already been asked. + Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'</s> ===========changed ref 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): # offset: 1 <s> have already been asked. + Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" - Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. + Generate a search query based on the conversation and the new question. - Generate a search query based on the conversation and the new question. + Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. - Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. + Do not include any text inside [] or <<>> in the search query terms. - Do not include any text inside [] or <<>> in the search query terms. + Do not include any special characters like '+'. - Do not include any special characters like '+'. + If the question is not in English, translate the question to English before generating the search query. - If the question is not in English, translate the question to English before generating the search query. - - Search Query: + If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {'role' : USER, 'content' : 'What are my health plans?' }, {'role' : ASSISTANT, 'content' : 'Show available health plans' }, {'role' : USER, 'content' : 'does my plan cover cardio?' }, {'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' } ] ===========changed ref 2=========== # module: scripts.prepdocs + def before_retry_sleep(retry_state): + if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") + ===========changed ref 3=========== # module: scripts.prepdocs + @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text): + return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] + ===========changed ref 4=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 5=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors): - def create_sections(filename, page_map): file_id = filename_to_id(filename) + for i, (content, pagenum) in enumerate(split_text(page_map)): - for i, (section, pagenum) in enumerate(split_text(page_map)): + section = { - yield { "id": f"{file_id}-page-{i}", + "content": content, - "content": section, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } + if use_vectors: + section["embedding"] = compute_embedding(content) + yield section
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<0>:<add> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <add> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <add> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <del> use_semantic_captions = True if overrides.get("semantic_captions") else False <25>:<add> query_text = chat_completion.choices[0].message.content <del> q = chat_completion.choices[0].message.content <26>:<add> if query_text.strip() == "0": <add> query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query <28>:<add> <add> # If retrieval mode includes vectors, compute an embedding for the query <add> if has_vector: <add> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] <add> else: <add> query_vector = None <add> <add> # Only keep
# module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> user_q = 'Generate search query for: ' + history[-1]["user"] <6> <7> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <8> messages = self.get_messages_from_history( <9> self.query_prompt_template, <10> self.chatgpt_model, <11> history, <12> user_q, <13> self.query_prompt_few_shots, <14> self.chatgpt_token_limit - len(user_q) <15> ) <16> <17> chat_completion = openai.ChatCompletion.create( <18> deployment_id=self.chatgpt_deployment, <19> model=self.chatgpt_model, <20> messages=messages, <21> temperature=0.0, <22> max_tokens=32, <23> n=1) <24> <25> q = chat_completion.choices[0].message.content <26> <27> # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query <28> if overrides.get("semantic_ranker"): <29> r = self.search_client.search(q, <30> filter=filter, <31> query_type=QueryType.SEMANTIC, <32> query_language="en-us", <33> query_speller="lexicon", <34> semantic_configuration_name="default", <35> top=top, <36> query_caption="extractive</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 else: r = self.search_client.search(q, filter=filter, top=top) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # STEP 3: Generate a contextual and content specific answer using the search results and chat history # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_override") if prompt_override is None: system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) else: system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) # latest conversation user_content = history[-1]["user"] + " \nSources:" + content messages = self.get_messages_from_history( system_message, self.chatgpt_model, history, user_content, max_tokens=self.chatg</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s> self.chatgpt_model, history, user_content, max_tokens=self.chatgpt_token_limit) chat_completion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chat_content = chat_completion.choices[0].message.content msg_to_display = '\n\n'.join([str(message) for message in messages]) return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{q}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. Search Query: """ query_prompt_few_shots = [ {'role' : USER, 'content' : 'What are my health plans?' }, {'role' : ASSISTANT, 'content' : 'Show available health plans' }, {'role' : USER, 'content' : 'does my plan cover cardio?' }, {'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' } ] get_messages_from_history(system_prompt: str, model_id: str, history: Sequence[dict[str, str]], user_conv: str, few_shots=[], max_tokens: int=4096) -> [] at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach.__init__ self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model self.sourcepage_field = sourcepage_field self.content_field = content_field self.chatgpt_token_limit = get_token_limit(chatgpt_model) at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 2=========== at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing Sequence = _alias(collections.abc.Sequence, 1) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.__init__
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<3>:<add> self.embedding_deployment = embedding_deployment
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): <0> self.search_client = search_client <1> self.openai_deployment = openai_deployment <2> self.chatgpt_model = chatgpt_model <3> self.sourcepage_field = sourcepage_field <4> self.content_field = content_field <5>
===========changed ref 0=========== # module: scripts.prepdocs + def before_retry_sleep(retry_state): + if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") + ===========changed ref 1=========== # module: scripts.prepdocs + @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text): + return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] + ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 3=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 4=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors): - def create_sections(filename, page_map): file_id = filename_to_id(filename) + for i, (content, pagenum) in enumerate(split_text(page_map)): - for i, (section, pagenum) in enumerate(split_text(page_map)): + section = { - yield { "id": f"{file_id}-page-{i}", + "content": content, - "content": section, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } + if use_vectors: + section["embedding"] = compute_embedding(content) + yield section ===========changed ref 5=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient(endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=[ SimpleField(name="id", type="Edm.String", key=True), SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), + SearchField(name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, + vector_search_dimensions=1536, vector_search_configuration="default"), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True) ], semantic_settings=SemanticSettings( configurations=[SemanticConfiguration( name='default', prioritized_fields=PrioritizedFields( + title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]), - title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]) + vector_search=VectorSearch( + algorithm_configurations=[ + VectorSearchAlgorithmConfiguration( + name="default", + kind="hnsw", + hnsw_parameters=HnswParameters(metric="cosine") + ) + ] + ) + ) - ) if args.verbose: print(f"Creating {args.index} search index") index</s> ===========changed ref 6=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s> <add> ) - ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists") ===========changed ref 7=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, q: str, overrides: dict[str, Any]) -> Any: + has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] + has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] + use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False - use_semantic_captions = True if overrides.get("semantic_captions") else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None + # If retrieval mode includes vectors, compute an embedding for the query + if has_vector: + query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] + else: + query_vector = None + + # Only keep the text query if the retrieval mode uses text, otherwise drop it + if not has_text: + query_text = None + + # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) + if overrides.get("semantic_ranker") and has_text: - if overrides.get("semantic_ranker"): + r = self.search_client.search(query_text, - r = self.search_client.search(q, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top = top, + </s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<0>:<add> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <add> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <add> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <del> use_semantic_captions = True if overrides.get("semantic_captions") else False <5>:<add> # If retrieval mode includes vectors, compute an embedding for the query <add> if has_vector: <add> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=q)["data"][0]["embedding"] <add> else: <add> query_vector = None <add> <add> # Only keep the text query if the retrieval mode uses text, otherwise drop it <add> query_text = q if has_text else None <add> <add> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <add> if overrides.get("semantic_ranker") and has_text: <del> if overrides.get("semantic_ranker"): <6>:<add> r = self.search_client.search(query_
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any]) -> Any: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> if overrides.get("semantic_ranker"): <6> r = self.search_client.search(q, <7> filter=filter, <8> query_type=QueryType.SEMANTIC, <9> query_language="en-us", <10> query_speller="lexicon", <11> semantic_configuration_name="default", <12> top=top, <13> query_caption="extractive|highlight-false" if use_semantic_captions else None) <14> else: <15> r = self.search_client.search(q, filter=filter, top=top) <16> if use_semantic_captions: <17> results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] <18> else: <19> results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] <20> content = "\n".join(results) <21> <22> message_builder = MessageBuilder(overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model); <23> <24> # add user question <25> user_content = q + "\n" + "Sources:\n {content}".format(content=content) <26> message_builder.append_message('user', user_content) <27> <28> # Add shots/samples. This helps model to</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 message_builder.append_message('assistant', self.answer) message_builder.append_message('user', self.question) messages = message_builder.messages chat_completion = openai.ChatCompletion.create( deployment_id=self.openai_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1) return {"data_points": results, "answer": chat_completion.choices[0].message.content, "thoughts": f"Question:<br>{q}<br><br>Prompt:<br>" + '\n\n'.join([str(message) for message in messages])} ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ "If you cannot answer using the sources below, say you don't know. Use below example to answer" question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model ===========unchanged ref 1=========== self.sourcepage_field = sourcepage_field self.content_field = content_field at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: core.messagebuilder.MessageBuilder append_message(role: str, content: str, index: int=1) at: core.messagebuilder.MessageBuilder.__init__ self.messages = [{'role': 'system', 'content': system_content}] at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 1=========== # module: scripts.prepdocs + def before_retry_sleep(retry_state): + if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") + ===========changed ref 2=========== # module: scripts.prepdocs + @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text): + return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] + ===========changed ref 3=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.chatgpt_token_limit = get_token_limit(chatgpt_model)
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.__init__
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<2>:<add> self.embedding_deployment = embedding_deployment
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): <0> self.search_client = search_client <1> self.openai_deployment = openai_deployment <2> self.sourcepage_field = sourcepage_field <3> self.content_field = content_field <4>
===========changed ref 0=========== # module: scripts.prepdocs + def before_retry_sleep(retry_state): + if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") + ===========changed ref 1=========== # module: scripts.prepdocs + @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text): + return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] + ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 3=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 4=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 5=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors): - def create_sections(filename, page_map): file_id = filename_to_id(filename) + for i, (content, pagenum) in enumerate(split_text(page_map)): - for i, (section, pagenum) in enumerate(split_text(page_map)): + section = { - yield { "id": f"{file_id}-page-{i}", + "content": content, - "content": section, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } + if use_vectors: + section["embedding"] = compute_embedding(content) + yield section ===========changed ref 6=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient(endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=[ SimpleField(name="id", type="Edm.String", key=True), SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), + SearchField(name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, + vector_search_dimensions=1536, vector_search_configuration="default"), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True) ], semantic_settings=SemanticSettings( configurations=[SemanticConfiguration( name='default', prioritized_fields=PrioritizedFields( + title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]), - title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]) + vector_search=VectorSearch( + algorithm_configurations=[ + VectorSearchAlgorithmConfiguration( + name="default", + kind="hnsw", + hnsw_parameters=HnswParameters(metric="cosine") + ) + ] + ) + ) - ) if args.verbose: print(f"Creating {args.index} search index") index</s> ===========changed ref 7=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s> <add> ) - ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists")
app.backend.approaches.readdecomposeask/ReadDecomposeAsk.search
Modified
Azure-Samples~azure-search-openai-demo
85791db8b36483aaca35cb288e20706391323b14
Vector search (#424)
<0>:<add> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <add> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <add> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <del> use_semantic_captions = True if overrides.get("semantic_captions") else False <5>:<add> # If retrieval mode includes vectors, compute an embedding for the query <add> if has_vector: <add> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] <add> else: <add> query_vector = None <add> <add> # Only keep the text query if the retrieval mode uses text, otherwise drop it <add> if not has_text: <add> query_text = None <add> <add> if overrides.get("semantic_ranker") and has_text: <del> if overrides.get("semantic_ranker"): <6>:<add> r = self.search_client.search(query_text, <del> r = self.search_client.search(q, <12>:<add> top=top,
# module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(Approach): + def search(self, query_text: str, overrides: dict[str, Any]) -> str: - def search(self, q: str, overrides: dict[str, Any]) -> str: <0> use_semantic_captions = True if overrides.get("semantic_captions") else False <1> top = overrides.get("top") or 3 <2> exclude_category = overrides.get("exclude_category") or None <3> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <4> <5> if overrides.get("semantic_ranker"): <6> r = self.search_client.search(q, <7> filter=filter, <8> query_type=QueryType.SEMANTIC, <9> query_language="en-us", <10> query_speller="lexicon", <11> semantic_configuration_name="default", <12> top = top, <13> query_caption="extractive|highlight-false" if use_semantic_captions else None) <14> else: <15> r = self.search_client.search(q, filter=filter, top=top) <16> if use_semantic_captions: <17> self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) for doc in r] <18> else: <19> self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) for doc in r] <20> return "\n".join(self.results) <21>
===========unchanged ref 0=========== at: app.backend.approaches.readdecomposeask.ReadDecomposeAsk.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.readdecomposeask class ReadDecomposeAsk(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 1=========== # module: scripts.prepdocs + def before_retry_sleep(retry_state): + if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") + ===========changed ref 2=========== # module: scripts.prepdocs + @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep) + def compute_embedding(text): + return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"] + ===========changed ref 3=========== # module: app.backend.approaches.readretrieveread class ReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 4=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): + def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, openai_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.openai_deployment = openai_deployment self.chatgpt_model = chatgpt_model + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field ===========changed ref 5=========== # module: app.backend.approaches.chatreadretrieveread class ChatReadRetrieveReadApproach(Approach): + def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str): - def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, sourcepage_field: str, content_field: str): self.search_client = search_client self.chatgpt_deployment = chatgpt_deployment self.chatgpt_model = chatgpt_model + self.embedding_deployment = embedding_deployment self.sourcepage_field = sourcepage_field self.content_field = content_field self.chatgpt_token_limit = get_token_limit(chatgpt_model) ===========changed ref 6=========== # module: scripts.prepdocs + def create_sections(filename, page_map, use_vectors): - def create_sections(filename, page_map): file_id = filename_to_id(filename) + for i, (content, pagenum) in enumerate(split_text(page_map)): - for i, (section, pagenum) in enumerate(split_text(page_map)): + section = { - yield { "id": f"{file_id}-page-{i}", + "content": content, - "content": section, "category": args.category, "sourcepage": blob_name_from_file_page(filename, pagenum), "sourcefile": filename } + if use_vectors: + section["embedding"] = compute_embedding(content) + yield section ===========changed ref 7=========== # module: scripts.prepdocs def create_search_index(): if args.verbose: print(f"Ensuring search index {args.index} exists") index_client = SearchIndexClient(endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds) if args.index not in index_client.list_index_names(): index = SearchIndex( name=args.index, fields=[ SimpleField(name="id", type="Edm.String", key=True), SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"), + SearchField(name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + hidden=False, searchable=True, filterable=False, sortable=False, facetable=False, + vector_search_dimensions=1536, vector_search_configuration="default"), SimpleField(name="category", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True), SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True) ], semantic_settings=SemanticSettings( configurations=[SemanticConfiguration( name='default', prioritized_fields=PrioritizedFields( + title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]), - title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]) + vector_search=VectorSearch( + algorithm_configurations=[ + VectorSearchAlgorithmConfiguration( + name="default", + kind="hnsw", + hnsw_parameters=HnswParameters(metric="cosine") + ) + ] + ) + ) - ) if args.verbose: print(f"Creating {args.index} search index") index</s> ===========changed ref 8=========== # module: scripts.prepdocs def create_search_index(): # offset: 1 <s> <add> ) - ) if args.verbose: print(f"Creating {args.index} search index") index_client.create_index(index) else: if args.verbose: print(f"Search index {args.index} already exists")
app.backend.core.modelhelper/get_token_limit
Modified
Azure-Samples~azure-search-openai-demo
9b4b65bab522ad0c9a1e445f6b10e3cc6daac491
Add more tests for core/ folder (#430)
<1>:<add> raise ValueError("Expected model gpt-35-turbo and above") <del> raise ValueError("Expected Model Gpt-35-turbo and above")
# module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: <0> if model_id not in MODELS_2_TOKEN_LIMITS: <1> raise ValueError("Expected Model Gpt-35-turbo and above") <2> return MODELS_2_TOKEN_LIMITS.get(model_id) <3>
===========unchanged ref 0=========== at: app.backend.core.modelhelper MODELS_2_TOKEN_LIMITS = { "gpt-35-turbo": 4000, "gpt-3.5-turbo": 4000, "gpt-35-turbo-16k": 16000, "gpt-3.5-turbo-16k": 16000, "gpt-4": 8100, "gpt-4-32k": 32000 } at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.core.modelhelper/get_oai_chatmodel_tiktok
Modified
Azure-Samples~azure-search-openai-demo
9b4b65bab522ad0c9a1e445f6b10e3cc6daac491
Add more tests for core/ folder (#430)
<0>:<add> message = "Expected Azure OpenAI ChatGPT model name" <1>:<add> raise ValueError(message) <add> if aoaimodel not in AOAI_2_OAI and aoaimodel not in MODELS_2_TOKEN_LIMITS: <add> raise ValueError(message) <add> return AOAI_2_OAI.get(aoaimodel) or aoaimodel <del> raise ValueError("Expected AOAI chatGPT model name") <3>:<del> return AOAI_2_OAI.get(aoaimodel) <4>:<del>
# module: app.backend.core.modelhelper def get_oai_chatmodel_tiktok(aoaimodel: str) -> str: <0> if aoaimodel == "" or aoaimodel is None: <1> raise ValueError("Expected AOAI chatGPT model name") <2> <3> return AOAI_2_OAI.get(aoaimodel) <4>
===========unchanged ref 0=========== at: app.backend.core.modelhelper MODELS_2_TOKEN_LIMITS = { "gpt-35-turbo": 4000, "gpt-3.5-turbo": 4000, "gpt-35-turbo-16k": 16000, "gpt-3.5-turbo-16k": 16000, "gpt-4": 8100, "gpt-4-32k": 32000 } AOAI_2_OAI = { "gpt-35-turbo": "gpt-3.5-turbo", "gpt-35-turbo-16k": "gpt-3.5-turbo-16k" } ===========changed ref 0=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: + raise ValueError("Expected model gpt-35-turbo and above") - raise ValueError("Expected Model Gpt-35-turbo and above") return MODELS_2_TOKEN_LIMITS.get(model_id)
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
52abf79fdf545fc29c5c9159b88a6ff9010f4ed2
Ensure openai token (#262)
<0>:<del> ensure_openai_token()
# module: app.backend.app @app.route("/ask", methods=["POST"]) def ask(): <0> ensure_openai_token() <1> if not request.json: <2> return jsonify({"error": "request must be json"}), 400 <3> approach = request.json["approach"] <4> try: <5> impl = ask_approaches.get(approach) <6> if not impl: <7> return jsonify({"error": "unknown approach"}), 400 <8> r = impl.run(request.json["question"], request.json.get("overrides") or {}) <9> return jsonify(r) <10> except Exception as e: <11> logging.exception("Exception in /ask") <12> return jsonify({"error": str(e)}), 500 <13>
===========unchanged ref 0=========== at: app.backend.app ask_approaches = { "rtr": RetrieveThenReadApproach(search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT), "rrr": ReadRetrieveReadApproach(search_client, AZURE_OPENAI_GPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT), "rda": ReadDecomposeAsk(search_client, AZURE_OPENAI_GPT_DEPLOYMENT, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT) } app = Flask(__name__) at: approaches.readdecomposeask.ReadDecomposeAsk run(q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 1=========== at: approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \ "Answer the question using only the data provided in the information sources below. " \ "For tabular information return it as an html table. Do not return markdown format. " \ "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \ "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \ "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \ "If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \ "Never quote tool names as sources." \ "If you cannot answer using the sources below, say that you don't know. " \ "\n\nYou can access to the following tools:" template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." run(q: str, overrides: dict[str, Any]) -> Any ===========unchanged ref 2=========== at: approaches.retrievethenread.RetrieveThenReadApproach system_chat_template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ "If you cannot answer using the sources below, say you don't know. Use below example to answer" question = """ 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." run(q: str, overrides: dict[str, Any]) -> Any at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========unchanged ref 3=========== at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co]
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
52abf79fdf545fc29c5c9159b88a6ff9010f4ed2
Ensure openai token (#262)
<0>:<del> ensure_openai_token()
# module: app.backend.app @app.route("/chat", methods=["POST"]) def chat(): <0> ensure_openai_token() <1> if not request.json: <2> return jsonify({"error": "request must be json"}), 400 <3> approach = request.json["approach"] <4> try: <5> impl = chat_approaches.get(approach) <6> if not impl: <7> return jsonify({"error": "unknown approach"}), 400 <8> r = impl.run(request.json["history"], request.json.get("overrides") or {}) <9> return jsonify(r) <10> except Exception as e: <11> logging.exception("Exception in /chat") <12> return jsonify({"error": str(e)}), 500 <13>
===========unchanged ref 0=========== at: app.backend.app chat_approaches = { "rrr": ChatReadRetrieveReadApproach(search_client, AZURE_OPENAI_CHATGPT_DEPLOYMENT, AZURE_OPENAI_CHATGPT_MODEL, AZURE_OPENAI_EMB_DEPLOYMENT, KB_FIELDS_SOURCEPAGE, KB_FIELDS_CONTENT) } app = Flask(__name__) at: approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'""" ===========unchanged ref 1=========== query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook. Generate a search query based on the conversation and the new question. Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms. Do not include any text inside [] or <<>> in the search query terms. Do not include any special characters like '+'. If the question is not in English, translate the question to English before generating the search query. If you cannot generate a search query, return just the number 0. """ query_prompt_few_shots = [ {'role' : USER, 'content' : 'What are my health plans?' }, {'role' : ASSISTANT, 'content' : 'Show available health plans' }, {'role' : USER, 'content' : 'does my plan cover cardio?' }, {'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' } ] run(history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.app @app.route("/ask", methods=["POST"]) def ask(): - ensure_openai_token() if not request.json: return jsonify({"error": "request must be json"}), 400 approach = request.json["approach"] try: impl = ask_approaches.get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 r = impl.run(request.json["question"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500
app.backend.app/ensure_openai_token
Modified
Azure-Samples~azure-search-openai-demo
52abf79fdf545fc29c5c9159b88a6ff9010f4ed2
Ensure openai token (#262)
<1>:<add> if openai_token.expires_on < time.time() + 60: <del> if openai_token.expires_on < int(time.time()) - 60:
# module: app.backend.app + @app.before_request def ensure_openai_token(): <0> global openai_token <1> if openai_token.expires_on < int(time.time()) - 60: <2> openai_token = azure_credential.get_token("https://cognitiveservices.azure.com/.default") <3> openai.api_key = openai_token.token <4>
===========unchanged ref 0=========== at: app.backend.app azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) at: openai api_key = os.environ.get("OPENAI_API_KEY") at: time time() -> float ===========changed ref 0=========== # module: app.backend.app @app.route("/chat", methods=["POST"]) def chat(): - ensure_openai_token() if not request.json: return jsonify({"error": "request must be json"}), 400 approach = request.json["approach"] try: impl = chat_approaches.get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 r = impl.run(request.json["history"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /chat") return jsonify({"error": str(e)}), 500 ===========changed ref 1=========== # module: app.backend.app @app.route("/ask", methods=["POST"]) def ask(): - ensure_openai_token() if not request.json: return jsonify({"error": "request must be json"}), 400 approach = request.json["approach"] try: impl = ask_approaches.get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 r = impl.run(request.json["question"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500
scripts.prepdocs/remove_blobs
Modified
Azure-Samples~azure-search-openai-demo
07a97f3a74b75e6e938cec471fabd1fa7a9111f1
Run ruff, add to precommit (#491)
<4>:<add> if filename is None: <del> if filename == None:
# module: scripts.prepdocs def remove_blobs(filename): <0> if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") <1> blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) <2> blob_container = blob_service.get_container_client(args.container) <3> if blob_container.exists(): <4> if filename == None: <5> blobs = blob_container.list_blob_names() <6> else: <7> prefix = os.path.splitext(os.path.basename(filename))[0] <8> blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])) <9> for b in blobs: <10> if args.verbose: print(f"\tRemoving blob {b}") <11> blob_container.delete_blob(b) <12>
===========unchanged ref 0=========== at: os.path splitext(p: AnyStr) -> Tuple[AnyStr, AnyStr] splitext(p: _PathLike[AnyStr]) -> Tuple[AnyStr, AnyStr] basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: re match(pattern: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]] match(pattern: Pattern[AnyStr], string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]] at: scripts.prepdocs args = parser.parse_args() storage_creds = default_creds if args.storagekey is None else args.storagekey
scripts.prepdocs/get_document_text
Modified
Azure-Samples~azure-search-openai-demo
07a97f3a74b75e6e938cec471fabd1fa7a9111f1
Run ruff, add to precommit (#491)
# module: scripts.prepdocs def get_document_text(filename): <0> offset = 0 <1> page_map = [] <2> if args.localpdfparser: <3> reader = PdfReader(filename) <4> pages = reader.pages <5> for page_num, p in enumerate(pages): <6> page_text = p.extract_text() <7> page_map.append((page_num, offset, page_text)) <8> offset += len(page_text) <9> else: <10> if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") <11> form_recognizer_client = DocumentAnalysisClient(endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}) <12> with open(filename, "rb") as f: <13> poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document = f) <14> form_recognizer_results = poller.result() <15> <16> for page_num, page in enumerate(form_recognizer_results.pages): <17> tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1] <18> <19> # mark all positions of the table spans in the page <20> page_offset = page.spans[0].offset <21> page_length = page.spans[0].length <22> table_chars = [-1]*page_length <23> for table_id, table in enumerate(tables_on_page): <24> for span in table.spans: <25> # replace all table spans with "table_id" in table_chars array <26> for i in range(span.length): <27> idx = span.offset - page_offset + i <28> if idx >=0 and idx < page_length: <29> table_chars[idx] = table_id <30> <31> </s>
===========below chunk 0=========== # module: scripts.prepdocs def get_document_text(filename): # offset: 1 page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] elif not table_id in added_tables: page_text += table_to_html(tables_on_page[table_id]) added_tables.add(table_id) page_text += " " page_map.append((page_num, offset, page_text)) offset += len(page_text) return page_map ===========unchanged ref 0=========== at: scripts.prepdocs table_to_html(table) args = parser.parse_args() formrecognizer_creds = default_creds if args.formrecognizerkey is None else AzureKeyCredential(args.formrecognizerkey) ===========changed ref 0=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): + if filename is None: - if filename == None: blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b)
scripts.prepdocs/split_text
Modified
Azure-Samples~azure-search-openai-demo
07a97f3a74b75e6e938cec471fabd1fa7a9111f1
Run ruff, add to precommit (#491)
<5>:<add> num_pages = len(page_map) <del> l = len(page_map) <6>:<add> for i in range(num_pages - 1): <del> for i in range(l - 1): <9>:<add> return num_pages - 1 <del> return l - 1
# module: scripts.prepdocs def split_text(page_map): <0> SENTENCE_ENDINGS = [".", "!", "?"] <1> WORDS_BREAKS = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] <2> if args.verbose: print(f"Splitting '{filename}' into sections") <3> <4> def find_page(offset): <5> l = len(page_map) <6> for i in range(l - 1): <7> if offset >= page_map[i][1] and offset < page_map[i + 1][1]: <8> return i <9> return l - 1 <10> <11> all_text = "".join(p[2] for p in page_map) <12> length = len(all_text) <13> start = 0 <14> end = length <15> while start + SECTION_OVERLAP < length: <16> last_word = -1 <17> end = start + MAX_SECTION_LENGTH <18> <19> if end > length: <20> end = length <21> else: <22> # Try to find the end of the sentence <23> while end < length and (end - start - MAX_SECTION_LENGTH) < SENTENCE_SEARCH_LIMIT and all_text[end] not in SENTENCE_ENDINGS: <24> if all_text[end] in WORDS_BREAKS: <25> last_word = end <26> end += 1 <27> if end < length and all_text[end] not in SENTENCE_ENDINGS and last_word > 0: <28> end = last_word # Fall back to at least keeping a whole word <29> if end < length: <30> end += 1 <31> <32> # Try to find the start of the sentence or at least a whole word boundary <33> last_word = -1 <34> while start > 0 and start > end - MAX_SECTION_LENGTH - 2 * SENTENCE_SEARCH_LIMIT and all_text[start] not in SENTENCE_ENDINGS: <35> if all_text[start] in WORDS_BREAKS: <36> last_word =</s>
===========below chunk 0=========== # module: scripts.prepdocs def split_text(page_map): # offset: 1 start -= 1 if all_text[start] not in SENTENCE_ENDINGS and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield (section_text, find_page(start)) last_table_start = section_text.rfind("<table") if (last_table_start > 2 * SENTENCE_SEARCH_LIMIT and last_table_start > section_text.rfind("</table")): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside SENTENCE_SEARCH_LIMIT, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside SECTION_OVERLAP, keep overlapping if args.verbose: print(f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}") start = min(end - SECTION_OVERLAP, start + last_table_start) else: start = end - SECTION_OVERLAP if start + SECTION_OVERLAP < end: yield (all_text[start:end], find_page(start)) ===========unchanged ref 0=========== at: scripts.prepdocs MAX_SECTION_LENGTH = 1000 SENTENCE_SEARCH_LIMIT = 100 SECTION_OVERLAP = 100 args = parser.parse_args() ===========changed ref 0=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): + if filename is None: - if filename == None: blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 1=========== # module: scripts.prepdocs def get_document_text(filename): offset = 0 page_map = [] if args.localpdfparser: reader = PdfReader(filename) pages = reader.pages for page_num, p in enumerate(pages): page_text = p.extract_text() page_map.append((page_num, offset, page_text)) offset += len(page_text) else: if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") form_recognizer_client = DocumentAnalysisClient(endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}) with open(filename, "rb") as f: poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document = f) form_recognizer_results = poller.result() for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1]*page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx = span.offset - page_offset + i if idx >=0 and idx < page_length: table_chars[idx] = table_id # build page text by replacing charcters in table spans with table html page_text = "" added_tables = set() </s> ===========changed ref 2=========== # module: scripts.prepdocs def get_document_text(filename): # offset: 1 <s> page text by replacing charcters in table spans with table html page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] + elif table_id not in added_tables: - elif not table_id in added_tables: page_text += table_to_html(tables_on_page[table_id]) added_tables.add(table_id) page_text += " " page_map.append((page_num, offset, page_text)) offset += len(page_text) return page_map
scripts.prepdocs/before_retry_sleep
Modified
Azure-Samples~azure-search-openai-demo
07a97f3a74b75e6e938cec471fabd1fa7a9111f1
Run ruff, add to precommit (#491)
<0>:<add> if args.verbose: print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") <del> if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...")
# module: scripts.prepdocs def before_retry_sleep(retry_state): <0> if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") <1>
===========unchanged ref 0=========== at: scripts.prepdocs args = parser.parse_args() ===========changed ref 0=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): + if filename is None: - if filename == None: blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 1=========== # module: scripts.prepdocs def get_document_text(filename): offset = 0 page_map = [] if args.localpdfparser: reader = PdfReader(filename) pages = reader.pages for page_num, p in enumerate(pages): page_text = p.extract_text() page_map.append((page_num, offset, page_text)) offset += len(page_text) else: if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") form_recognizer_client = DocumentAnalysisClient(endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}) with open(filename, "rb") as f: poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document = f) form_recognizer_results = poller.result() for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1]*page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx = span.offset - page_offset + i if idx >=0 and idx < page_length: table_chars[idx] = table_id # build page text by replacing charcters in table spans with table html page_text = "" added_tables = set() </s> ===========changed ref 2=========== # module: scripts.prepdocs def get_document_text(filename): # offset: 1 <s> page text by replacing charcters in table spans with table html page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] + elif table_id not in added_tables: - elif not table_id in added_tables: page_text += table_to_html(tables_on_page[table_id]) added_tables.add(table_id) page_text += " " page_map.append((page_num, offset, page_text)) offset += len(page_text) return page_map ===========changed ref 3=========== # module: scripts.prepdocs def split_text(page_map): SENTENCE_ENDINGS = [".", "!", "?"] WORDS_BREAKS = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] if args.verbose: print(f"Splitting '{filename}' into sections") def find_page(offset): + num_pages = len(page_map) - l = len(page_map) + for i in range(num_pages - 1): - for i in range(l - 1): if offset >= page_map[i][1] and offset < page_map[i + 1][1]: return i + return num_pages - 1 - return l - 1 all_text = "".join(p[2] for p in page_map) length = len(all_text) start = 0 end = length while start + SECTION_OVERLAP < length: last_word = -1 end = start + MAX_SECTION_LENGTH if end > length: end = length else: # Try to find the end of the sentence while end < length and (end - start - MAX_SECTION_LENGTH) < SENTENCE_SEARCH_LIMIT and all_text[end] not in SENTENCE_ENDINGS: if all_text[end] in WORDS_BREAKS: last_word = end end += 1 if end < length and all_text[end] not in SENTENCE_ENDINGS and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while start > 0 and start > end - MAX_SECTION_LENGTH - 2 * SENTENCE_SEARCH_LIMIT and all_text[start] not in SENTENCE_ENDINGS: if all_text[start] in WORDS_BREAKS: last_</s> ===========changed ref 4=========== # module: scripts.prepdocs def split_text(page_map): # offset: 1 <s>] not in SENTENCE_ENDINGS: if all_text[start] in WORDS_BREAKS: last_word = start start -= 1 if all_text[start] not in SENTENCE_ENDINGS and last_word > 0: start = last_word if start > 0: start += 1 section_text = all_text[start:end] yield (section_text, find_page(start)) last_table_start = section_text.rfind("<table") if (last_table_start > 2 * SENTENCE_SEARCH_LIMIT and last_table_start > section_text.rfind("</table")): # If the section ends with an unclosed table, we need to start the next section with the table. # If table starts inside SENTENCE_SEARCH_LIMIT, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH # If last table starts inside SECTION_OVERLAP, keep overlapping if args.verbose: print(f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}") start = min(end - SECTION_OVERLAP, start + last_table_start) else: start = end - SECTION_OVERLAP + - if start + SECTION_OVERLAP < end: yield (all_text[start:end], find_page(start))
scripts.prepdocs/remove_from_index
Modified
Azure-Samples~azure-search-openai-demo
07a97f3a74b75e6e938cec471fabd1fa7a9111f1
Run ruff, add to precommit (#491)
<5>:<add> filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'" <del> filter = None if filename == None else f"sourcefile eq '{os.path.basename(filename)}'"
# module: scripts.prepdocs def remove_from_index(filename): <0> if args.verbose: print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'") <1> search_client = SearchClient(endpoint=f"https://{args.searchservice}.search.windows.net/", <2> index_name=args.index, <3> credential=search_creds) <4> while True: <5> filter = None if filename == None else f"sourcefile eq '{os.path.basename(filename)}'" <6> r = search_client.search("", filter=filter, top=1000, include_total_count=True) <7> if r.get_count() == 0: <8> break <9> r = search_client.delete_documents(documents=[{ "id": d["id"] } for d in r]) <10> if args.verbose: print(f"\tRemoved {len(r)} sections from index") <11> # It can take a few seconds for search results to reflect changes, so wait a bit <12> time.sleep(2) <13>
===========unchanged ref 0=========== at: os.path basename(p: _PathLike[AnyStr]) -> AnyStr basename(p: AnyStr) -> AnyStr at: scripts.prepdocs args = parser.parse_args() search_creds = default_creds if args.searchkey is None else AzureKeyCredential(args.searchkey) at: time sleep(secs: float) -> None ===========changed ref 0=========== # module: scripts.prepdocs def before_retry_sleep(retry_state): + if args.verbose: print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 1=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): + if filename is None: - if filename == None: blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 2=========== # module: scripts.prepdocs def get_document_text(filename): offset = 0 page_map = [] if args.localpdfparser: reader = PdfReader(filename) pages = reader.pages for page_num, p in enumerate(pages): page_text = p.extract_text() page_map.append((page_num, offset, page_text)) offset += len(page_text) else: if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") form_recognizer_client = DocumentAnalysisClient(endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}) with open(filename, "rb") as f: poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document = f) form_recognizer_results = poller.result() for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1]*page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx = span.offset - page_offset + i if idx >=0 and idx < page_length: table_chars[idx] = table_id # build page text by replacing charcters in table spans with table html page_text = "" added_tables = set() </s> ===========changed ref 3=========== # module: scripts.prepdocs def get_document_text(filename): # offset: 1 <s> page text by replacing charcters in table spans with table html page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] + elif table_id not in added_tables: - elif not table_id in added_tables: page_text += table_to_html(tables_on_page[table_id]) added_tables.add(table_id) page_text += " " page_map.append((page_num, offset, page_text)) offset += len(page_text) return page_map ===========changed ref 4=========== # module: scripts.prepdocs def split_text(page_map): SENTENCE_ENDINGS = [".", "!", "?"] WORDS_BREAKS = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"] if args.verbose: print(f"Splitting '{filename}' into sections") def find_page(offset): + num_pages = len(page_map) - l = len(page_map) + for i in range(num_pages - 1): - for i in range(l - 1): if offset >= page_map[i][1] and offset < page_map[i + 1][1]: return i + return num_pages - 1 - return l - 1 all_text = "".join(p[2] for p in page_map) length = len(all_text) start = 0 end = length while start + SECTION_OVERLAP < length: last_word = -1 end = start + MAX_SECTION_LENGTH if end > length: end = length else: # Try to find the end of the sentence while end < length and (end - start - MAX_SECTION_LENGTH) < SENTENCE_SEARCH_LIMIT and all_text[end] not in SENTENCE_ENDINGS: if all_text[end] in WORDS_BREAKS: last_word = end end += 1 if end < length and all_text[end] not in SENTENCE_ENDINGS and last_word > 0: end = last_word # Fall back to at least keeping a whole word if end < length: end += 1 # Try to find the start of the sentence or at least a whole word boundary last_word = -1 while start > 0 and start > end - MAX_SECTION_LENGTH - 2 * SENTENCE_SEARCH_LIMIT and all_text[start] not in SENTENCE_ENDINGS: if all_text[start] in WORDS_BREAKS: last_</s>
app.backend.langchainadapters/HtmlCallbackHandler.on_llm_start
Modified
Azure-Samples~azure-search-openai-demo
07a97f3a74b75e6e938cec471fabd1fa7a9111f1
Run ruff, add to precommit (#491)
<1>:<add> self.html += "LLM prompts:<br>" + "<br>".join(ch(prompts)) + "<br>" <del> self.html += f"LLM prompts:<br>" + "<br>".join(ch(prompts)) + "<br>";
# module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): - def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: <0> """Print out the prompts.""" <1> self.html += f"LLM prompts:<br>" + "<br>".join(ch(prompts)) + "<br>"; <2>
===========unchanged ref 0=========== at: app.backend.langchainadapters.HtmlCallbackHandler html: str = "" at: app.backend.langchainadapters.HtmlCallbackHandler.get_and_reset_log result = self.html at: app.backend.langchainadapters.HtmlCallbackHandler.on_agent_action self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_agent_finish self.html += f"<span style='color:{color}'>{ch(finish.log)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_chain_end self.html += "Finished chain<br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_chain_error self.html += f"<span style='color:red'>Chain error: {ch(error)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_chain_start self.html += f"Entering chain: {ch(class_name)}<br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_llm_error self.html += f"<span style='color:red'>LLM error: {ch(error)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_llm_start self.html += "LLM prompts:<br>" + "<br>".join(ch(prompts)) + "<br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_text self.html += f"<span style='color:{color}'>{ch(text)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_tool_end self.html += f"{ch(observation_prefix)}<br><span style='color:{color}'>{ch(output)}</span><br>{ch(llm_prefix)}<br>" ===========unchanged ref 1=========== at: app.backend.langchainadapters.HtmlCallbackHandler.on_tool_error self.html += f"<span style='color:red'>Tool error: {ch(error)}</span><br>" at: typing List = _alias(list, 1, inst=False, name='List') Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== # module: scripts.prepdocs def before_retry_sleep(retry_state): + if args.verbose: print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 1=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): + if filename is None: - if filename == None: blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 2=========== # module: scripts.prepdocs def remove_from_index(filename): if args.verbose: print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'") search_client = SearchClient(endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds) while True: + filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'" - filter = None if filename == None else f"sourcefile eq '{os.path.basename(filename)}'" r = search_client.search("", filter=filter, top=1000, include_total_count=True) if r.get_count() == 0: break r = search_client.delete_documents(documents=[{ "id": d["id"] } for d in r]) if args.verbose: print(f"\tRemoved {len(r)} sections from index") # It can take a few seconds for search results to reflect changes, so wait a bit time.sleep(2) ===========changed ref 3=========== # module: scripts.prepdocs def get_document_text(filename): offset = 0 page_map = [] if args.localpdfparser: reader = PdfReader(filename) pages = reader.pages for page_num, p in enumerate(pages): page_text = p.extract_text() page_map.append((page_num, offset, page_text)) offset += len(page_text) else: if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") form_recognizer_client = DocumentAnalysisClient(endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}) with open(filename, "rb") as f: poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document = f) form_recognizer_results = poller.result() for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1]*page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx = span.offset - page_offset + i if idx >=0 and idx < page_length: table_chars[idx] = table_id # build page text by replacing charcters in table spans with table html page_text = "" added_tables = set() </s> ===========changed ref 4=========== # module: scripts.prepdocs def get_document_text(filename): # offset: 1 <s> page text by replacing charcters in table spans with table html page_text = "" added_tables = set() for idx, table_id in enumerate(table_chars): if table_id == -1: page_text += form_recognizer_results.content[page_offset + idx] + elif table_id not in added_tables: - elif not table_id in added_tables: page_text += table_to_html(tables_on_page[table_id]) added_tables.add(table_id) page_text += " " page_map.append((page_num, offset, page_text)) offset += len(page_text) return page_map
app.backend.langchainadapters/HtmlCallbackHandler.on_chain_end
Modified
Azure-Samples~azure-search-openai-demo
07a97f3a74b75e6e938cec471fabd1fa7a9111f1
Run ruff, add to precommit (#491)
<1>:<add> self.html += "Finished chain<br>" <del> self.html += f"Finished chain<br>"
# module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: <0> """Print out that we finished a chain.""" <1> self.html += f"Finished chain<br>" <2>
===========unchanged ref 0=========== at: app.backend.langchainadapters ch(text: Union[str, object]) -> str at: app.backend.langchainadapters.HtmlCallbackHandler.get_and_reset_log self.html = "" at: app.backend.langchainadapters.HtmlCallbackHandler.on_agent_action self.html += f"<span style='color:{color}'>{ch(action.log)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_agent_finish self.html += f"<span style='color:{color}'>{ch(finish.log)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_chain_end self.html += "Finished chain<br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_chain_error self.html += f"<span style='color:red'>Chain error: {ch(error)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_chain_start class_name = serialized["name"] at: app.backend.langchainadapters.HtmlCallbackHandler.on_llm_error self.html += f"<span style='color:red'>LLM error: {ch(error)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_llm_start self.html += "LLM prompts:<br>" + "<br>".join(ch(prompts)) + "<br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_text self.html += f"<span style='color:{color}'>{ch(text)}</span><br>" at: app.backend.langchainadapters.HtmlCallbackHandler.on_tool_end self.html += f"{ch(observation_prefix)}<br><span style='color:{color}'>{ch(output)}</span><br>{ch(llm_prefix)}<br>" ===========unchanged ref 1=========== at: app.backend.langchainadapters.HtmlCallbackHandler.on_tool_error self.html += f"<span style='color:red'>Tool error: {ch(error)}</span><br>" at: typing Dict = _alias(dict, 2, inst=False, name='Dict') ===========changed ref 0=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): - def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Print out the prompts.""" + self.html += "LLM prompts:<br>" + "<br>".join(ch(prompts)) + "<br>" - self.html += f"LLM prompts:<br>" + "<br>".join(ch(prompts)) + "<br>"; ===========changed ref 1=========== # module: scripts.prepdocs def before_retry_sleep(retry_state): + if args.verbose: print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...") ===========changed ref 2=========== # module: scripts.prepdocs def remove_blobs(filename): if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'") blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds) blob_container = blob_service.get_container_client(args.container) if blob_container.exists(): + if filename is None: - if filename == None: blobs = blob_container.list_blob_names() else: prefix = os.path.splitext(os.path.basename(filename))[0] blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0])) for b in blobs: if args.verbose: print(f"\tRemoving blob {b}") blob_container.delete_blob(b) ===========changed ref 3=========== # module: scripts.prepdocs def remove_from_index(filename): if args.verbose: print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'") search_client = SearchClient(endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds) while True: + filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'" - filter = None if filename == None else f"sourcefile eq '{os.path.basename(filename)}'" r = search_client.search("", filter=filter, top=1000, include_total_count=True) if r.get_count() == 0: break r = search_client.delete_documents(documents=[{ "id": d["id"] } for d in r]) if args.verbose: print(f"\tRemoved {len(r)} sections from index") # It can take a few seconds for search results to reflect changes, so wait a bit time.sleep(2) ===========changed ref 4=========== # module: scripts.prepdocs def get_document_text(filename): offset = 0 page_map = [] if args.localpdfparser: reader = PdfReader(filename) pages = reader.pages for page_num, p in enumerate(pages): page_text = p.extract_text() page_map.append((page_num, offset, page_text)) offset += len(page_text) else: if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer") form_recognizer_client = DocumentAnalysisClient(endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"}) with open(filename, "rb") as f: poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document = f) form_recognizer_results = poller.result() for page_num, page in enumerate(form_recognizer_results.pages): tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1] # mark all positions of the table spans in the page page_offset = page.spans[0].offset page_length = page.spans[0].length table_chars = [-1]*page_length for table_id, table in enumerate(tables_on_page): for span in table.spans: # replace all table spans with "table_id" in table_chars array for i in range(span.length): idx = span.offset - page_offset + i if idx >=0 and idx < page_length: table_chars[idx] = table_id # build page text by replacing charcters in table spans with table html page_text = "" added_tables = set() </s>
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
07a97f3a74b75e6e938cec471fabd1fa7a9111f1
Run ruff, add to precommit (#491)
<18>:<add> r = self.search_client.search(query_text, <del> r = self.search_client.search(query_text, <20>:<add> query_type=QueryType.SEMANTIC, <del> query_type=QueryType.SEMANTIC, <21>:<add> query_language="en-us", <del> query_language="en-us", <22>:<add> query_speller="lexicon", <del> query_speller="lexicon", <23>:<add> semantic_configuration_name="default", <del> semantic_configuration_name="default", <24>:<add> top=top, <del> top=top, <26>:<add> vector=query_vector, <del> vector=query_vector, <27>:<add> top_k=50 if query_vector else None, <del> top_k=50 if query_vector else None,
# module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any]) -> Any: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=q)["data"][0]["embedding"] <10> else: <11> query_vector = None <12> <13> # Only keep the text query if the retrieval mode uses text, otherwise drop it <14> query_text = q if has_text else None <15> <16> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <17> if overrides.get("semantic_ranker") and has_text: <18> r = self.search_client.search(query_text, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None, <26> vector=query_vector, <27> top_k=50 if query_vector else None, <28> vector_fields="embedding" if query_vector else None) <29> else: </s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) message_builder = MessageBuilder(overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model); # add user question user_content = q + "\n" + "Sources:\n {content}".format(content=content) message_builder.append_message('user', user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message('assistant', self.answer) message_builder.append_message('user', self.question) messages = message_builder.messages chat_completion = openai.ChatCompletion.create( deployment_id=self.openai_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1) return {"data_points": results, "answer": chat_completion.choices[0].message.content, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + '\n\</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 2 <s>, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + '\n\n'.join([str(message) for message in messages])} ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment at: approaches.approach.Approach run(self, q: str, overrides: dict[str, Any]) -> Any at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" create(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str ===========unchanged ref 1=========== at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): """ Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion (answer) with that prompt. """ system_chat_template = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + \ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + \ "Answer the following question using only the data provided in the sources below. " + \ "For tabular information return it as an html table. Do not return markdown format. " + \ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. " + \ "If you cannot answer using the sources below, say you don't know. Use below example to answer" #shots/sample conversation question = """ + 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' - 'What is the deductible for the employee plan for a visit to Overlake in Bellevue?' Sources: info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family. info2.pdf: Overlake is in-network for the employee plan. info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue. info4.pdf: In-network institutions include Overlake, Swedish and others in the region """ answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan</s> ===========changed ref 1=========== # module: app.backend.approaches.retrievethenread class RetrieveThenReadApproach(Approach): # offset: 1 <s> are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]." ===========changed ref 2=========== # module: app.backend.langchainadapters class HtmlCallbackHandler (BaseCallbackHandler): def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Print out that we finished a chain.""" + self.html += "Finished chain<br>" - self.html += f"Finished chain<br>" ===========changed ref 3=========== # module: scripts.prepdocs def before_retry_sleep(retry_state): + if args.verbose: print("Rate limited on the OpenAI embeddings API, sleeping before retrying...") - if args.verbose: print(f"Rate limited on the OpenAI embeddings API, sleeping before retrying...")
app.backend.app/content_file
Modified
Azure-Samples~azure-search-openai-demo
df48d8c23b4d0ae0ec6487cecd715a9db2943f64
Use an app factory pattern to enable app testing (#495)
<0>:<add> blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER)
# module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. + @bp.route("/content/<path>") - @app.route("/content/<path>") def content_file(path): <0> blob = blob_container.get_blob_client(path).download_blob() <1> if not blob.properties or not blob.properties.has_key("content_settings"): <2> abort(404) <3> mime_type = blob.properties["content_settings"]["content_type"] <4> if mime_type == "application/octet-stream": <5> mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" <6> blob_file = io.BytesIO() <7> blob.readinto(blob_file) <8> blob_file.seek(0) <9> return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) <10>
===========unchanged ref 0=========== at: app.backend.app CONFIG_ASK_APPROACHES = "ask_approaches" bp = Blueprint("routes", __name__, static_folder='static') at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.app - @app.route("/", defaults={"path": "index.html"}) - @app.route("/<path:path>") - def static_file(path): - return app.send_static_file(path) - ===========changed ref 1=========== # module: app.backend.app + @bp.route("/assets/<path:path>") + def assets(path): + return send_from_directory("static/assets", path) + ===========changed ref 2=========== # module: app.backend.app + @bp.route("/favicon.ico") + def favicon(): + return bp.send_static_file("favicon.ico") + ===========changed ref 3=========== # module: app.backend.app + @bp.route("/") + def index(): + return bp.send_static_file("index.html") + ===========changed ref 4=========== + # module: tests.test_app + + ===========changed ref 5=========== + # module: tests.conftest + + ===========changed ref 6=========== + # module: tests.conftest + class MockedChatApproach(ChatReadRetrieveReadApproach): + def __init__(self): + pass + ===========changed ref 7=========== + # module: tests.conftest + @pytest.fixture() + def client(app): + return app.test_client() + ===========changed ref 8=========== + # module: tests.conftest + @pytest.fixture() + def runner(app): + return app.test_cli_runner() + ===========changed ref 9=========== + # module: tests.conftest + class MockAzureCredential: + def get_token(self, uri): + return MockToken("mock_token", 9999999999) + ===========changed ref 10=========== + # module: tests.conftest + MockToken = namedtuple("MockToken", ["token", "expires_on"]) + ===========changed ref 11=========== + # module: tests.test_app + def test_index(client): + response = client.get("/") + assert response.status_code == 200 + ===========changed ref 12=========== + # module: tests.conftest + class MockedAskApproach(Approach): + def run(self, question, overrides): + assert question == "What is the capital of France?" + return {"answer": "Paris"} + ===========changed ref 13=========== + # module: tests.test_app + def test_chat_with_unknown_approach(client): + response = client.post("/chat", json={"approach": "test"}) + assert response.status_code == 400 + ===========changed ref 14=========== + # module: tests.test_app + def test_ask_with_unknown_approach(client): + response = client.post("/ask", json={"approach": "test"}) + assert response.status_code == 400 + ===========changed ref 15=========== + # module: tests.test_app + def test_chat_request_must_be_json(client): + response = client.post("/chat") + assert response.status_code == 415 + assert response.json["error"] == "request must be json" + ===========changed ref 16=========== + # module: tests.test_app + def test_ask_request_must_be_json(client): + response = client.post("/ask") + assert response.status_code == 415 + assert response.json["error"] == "request must be json" + ===========changed ref 17=========== + # module: tests.test_app + def test_ask_mock_approach(client): + response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) + assert response.status_code == 200 + assert response.json["answer"] == "Paris" + ===========changed ref 18=========== + # module: tests.test_app + def test_chat_mock_approach(client): + response = client.post( + "/chat", + json={ + "approach": "mock", + "history": [{"user": "What is the capital of France?"}], + }, + ) + assert response.status_code == 200 + assert response.json["answer"] == "Paris" + ===========changed ref 19=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 threads = 1 if num_cpus == 1 else 2 timeout = 600 + worker_class = "gthread" ===========changed ref 20=========== + # module: tests.conftest + class MockedChatApproach(ChatReadRetrieveReadApproach): + def run(self, history, overrides): + messages = ChatReadRetrieveReadApproach.get_messages_from_history(self, ChatReadRetrieveReadApproach.query_prompt_template, "gpt-3.5-turbo", history, "Generate search query") + assert messages[0]["role"] == "system" + assert messages[1]["content"] == "Generate search query" + assert messages[1]["role"] == "user" + return {"answer": "Paris", "data_points": [], "thoughts": ""} + ===========changed ref 21=========== + # module: tests.conftest + @pytest.fixture() + def app(): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + _app = backend_app.create_app() + _app.config.update( + { + "TESTING": True, + backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, + backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, + } + ) + + yield _app +
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
df48d8c23b4d0ae0ec6487cecd715a9db2943f64
Use an app factory pattern to enable app testing (#495)
<0>:<add> if not request.is_json: <del> if not request.json: <1>:<add> return jsonify({"error": "request must be json"}), 415 <del> return jsonify({"error": "request must be json"}), 400 <4>:<add> impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) <del> impl = ask_approaches.get(approach)
# module: app.backend.app + @bp.route("/ask", methods=["POST"]) - @app.route("/ask", methods=["POST"]) def ask(): <0> if not request.json: <1> return jsonify({"error": "request must be json"}), 400 <2> approach = request.json["approach"] <3> try: <4> impl = ask_approaches.get(approach) <5> if not impl: <6> return jsonify({"error": "unknown approach"}), 400 <7> r = impl.run(request.json["question"], request.json.get("overrides") or {}) <8> return jsonify(r) <9> except Exception as e: <10> logging.exception("Exception in /ask") <11> return jsonify({"error": str(e)}), 500 <12>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACHES = "chat_approaches" bp = Blueprint("routes", __name__, static_folder='static') at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.app - @app.route("/", defaults={"path": "index.html"}) - @app.route("/<path:path>") - def static_file(path): - return app.send_static_file(path) - ===========changed ref 1=========== # module: app.backend.app + @bp.route("/assets/<path:path>") + def assets(path): + return send_from_directory("static/assets", path) + ===========changed ref 2=========== # module: app.backend.app + @bp.route("/favicon.ico") + def favicon(): + return bp.send_static_file("favicon.ico") + ===========changed ref 3=========== # module: app.backend.app + @bp.route("/") + def index(): + return bp.send_static_file("index.html") + ===========changed ref 4=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. + @bp.route("/content/<path>") - @app.route("/content/<path>") def content_file(path): + blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) blob = blob_container.get_blob_client(path).download_blob() if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() blob.readinto(blob_file) blob_file.seek(0) return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) ===========changed ref 5=========== + # module: tests.test_app + + ===========changed ref 6=========== + # module: tests.conftest + + ===========changed ref 7=========== + # module: tests.conftest + class MockedChatApproach(ChatReadRetrieveReadApproach): + def __init__(self): + pass + ===========changed ref 8=========== + # module: tests.conftest + @pytest.fixture() + def client(app): + return app.test_client() + ===========changed ref 9=========== + # module: tests.conftest + @pytest.fixture() + def runner(app): + return app.test_cli_runner() + ===========changed ref 10=========== + # module: tests.conftest + class MockAzureCredential: + def get_token(self, uri): + return MockToken("mock_token", 9999999999) + ===========changed ref 11=========== + # module: tests.conftest + MockToken = namedtuple("MockToken", ["token", "expires_on"]) + ===========changed ref 12=========== + # module: tests.test_app + def test_index(client): + response = client.get("/") + assert response.status_code == 200 + ===========changed ref 13=========== + # module: tests.conftest + class MockedAskApproach(Approach): + def run(self, question, overrides): + assert question == "What is the capital of France?" + return {"answer": "Paris"} + ===========changed ref 14=========== + # module: tests.test_app + def test_chat_with_unknown_approach(client): + response = client.post("/chat", json={"approach": "test"}) + assert response.status_code == 400 + ===========changed ref 15=========== + # module: tests.test_app + def test_ask_with_unknown_approach(client): + response = client.post("/ask", json={"approach": "test"}) + assert response.status_code == 400 + ===========changed ref 16=========== + # module: tests.test_app + def test_chat_request_must_be_json(client): + response = client.post("/chat") + assert response.status_code == 415 + assert response.json["error"] == "request must be json" + ===========changed ref 17=========== + # module: tests.test_app + def test_ask_request_must_be_json(client): + response = client.post("/ask") + assert response.status_code == 415 + assert response.json["error"] == "request must be json" + ===========changed ref 18=========== + # module: tests.test_app + def test_ask_mock_approach(client): + response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) + assert response.status_code == 200 + assert response.json["answer"] == "Paris" + ===========changed ref 19=========== + # module: tests.test_app + def test_chat_mock_approach(client): + response = client.post( + "/chat", + json={ + "approach": "mock", + "history": [{"user": "What is the capital of France?"}], + }, + ) + assert response.status_code == 200 + assert response.json["answer"] == "Paris" + ===========changed ref 20=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 threads = 1 if num_cpus == 1 else 2 timeout = 600 + worker_class = "gthread" ===========changed ref 21=========== + # module: tests.conftest + class MockedChatApproach(ChatReadRetrieveReadApproach): + def run(self, history, overrides): + messages = ChatReadRetrieveReadApproach.get_messages_from_history(self, ChatReadRetrieveReadApproach.query_prompt_template, "gpt-3.5-turbo", history, "Generate search query") + assert messages[0]["role"] == "system" + assert messages[1]["content"] == "Generate search query" + assert messages[1]["role"] == "user" + return {"answer": "Paris", "data_points": [], "thoughts": ""} + ===========changed ref 22=========== + # module: tests.conftest + @pytest.fixture() + def app(): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + _app = backend_app.create_app() + _app.config.update( + { + "TESTING": True, + backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, + backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, + } + ) + + yield _app +
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
df48d8c23b4d0ae0ec6487cecd715a9db2943f64
Use an app factory pattern to enable app testing (#495)
<0>:<add> if not request.is_json: <del> if not request.json: <1>:<add> return jsonify({"error": "request must be json"}), 415 <del> return jsonify({"error": "request must be json"}), 400 <4>:<add> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <del> impl = chat_approaches.get(approach)
# module: app.backend.app + @bp.route("/chat", methods=["POST"]) - @app.route("/chat", methods=["POST"]) def chat(): <0> if not request.json: <1> return jsonify({"error": "request must be json"}), 400 <2> approach = request.json["approach"] <3> try: <4> impl = chat_approaches.get(approach) <5> if not impl: <6> return jsonify({"error": "unknown approach"}), 400 <7> r = impl.run(request.json["history"], request.json.get("overrides") or {}) <8> return jsonify(r) <9> except Exception as e: <10> logging.exception("Exception in /chat") <11> return jsonify({"error": str(e)}), 500 <12>
===========unchanged ref 0=========== at: app.backend.app CONFIG_OPENAI_TOKEN = "openai_token" CONFIG_CREDENTIAL = "azure_credential" at: openai api_key = os.environ.get("OPENAI_API_KEY") at: time time() -> float ===========changed ref 0=========== # module: app.backend.app - @app.route("/", defaults={"path": "index.html"}) - @app.route("/<path:path>") - def static_file(path): - return app.send_static_file(path) - ===========changed ref 1=========== # module: app.backend.app + @bp.route("/assets/<path:path>") + def assets(path): + return send_from_directory("static/assets", path) + ===========changed ref 2=========== # module: app.backend.app + @bp.route("/favicon.ico") + def favicon(): + return bp.send_static_file("favicon.ico") + ===========changed ref 3=========== # module: app.backend.app + @bp.route("/") + def index(): + return bp.send_static_file("index.html") + ===========changed ref 4=========== # module: app.backend.app + @bp.route("/ask", methods=["POST"]) - @app.route("/ask", methods=["POST"]) def ask(): + if not request.is_json: - if not request.json: + return jsonify({"error": "request must be json"}), 415 - return jsonify({"error": "request must be json"}), 400 approach = request.json["approach"] try: + impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) - impl = ask_approaches.get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 r = impl.run(request.json["question"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 5=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. + @bp.route("/content/<path>") - @app.route("/content/<path>") def content_file(path): + blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) blob = blob_container.get_blob_client(path).download_blob() if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() blob.readinto(blob_file) blob_file.seek(0) return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) ===========changed ref 6=========== + # module: tests.test_app + + ===========changed ref 7=========== + # module: tests.conftest + + ===========changed ref 8=========== + # module: tests.conftest + class MockedChatApproach(ChatReadRetrieveReadApproach): + def __init__(self): + pass + ===========changed ref 9=========== + # module: tests.conftest + @pytest.fixture() + def client(app): + return app.test_client() + ===========changed ref 10=========== + # module: tests.conftest + @pytest.fixture() + def runner(app): + return app.test_cli_runner() + ===========changed ref 11=========== + # module: tests.conftest + class MockAzureCredential: + def get_token(self, uri): + return MockToken("mock_token", 9999999999) + ===========changed ref 12=========== + # module: tests.conftest + MockToken = namedtuple("MockToken", ["token", "expires_on"]) + ===========changed ref 13=========== + # module: tests.test_app + def test_index(client): + response = client.get("/") + assert response.status_code == 200 + ===========changed ref 14=========== + # module: tests.conftest + class MockedAskApproach(Approach): + def run(self, question, overrides): + assert question == "What is the capital of France?" + return {"answer": "Paris"} + ===========changed ref 15=========== + # module: tests.test_app + def test_chat_with_unknown_approach(client): + response = client.post("/chat", json={"approach": "test"}) + assert response.status_code == 400 + ===========changed ref 16=========== + # module: tests.test_app + def test_ask_with_unknown_approach(client): + response = client.post("/ask", json={"approach": "test"}) + assert response.status_code == 400 + ===========changed ref 17=========== + # module: tests.test_app + def test_chat_request_must_be_json(client): + response = client.post("/chat") + assert response.status_code == 415 + assert response.json["error"] == "request must be json" + ===========changed ref 18=========== + # module: tests.test_app + def test_ask_request_must_be_json(client): + response = client.post("/ask") + assert response.status_code == 415 + assert response.json["error"] == "request must be json" + ===========changed ref 19=========== + # module: tests.test_app + def test_ask_mock_approach(client): + response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) + assert response.status_code == 200 + assert response.json["answer"] == "Paris" + ===========changed ref 20=========== + # module: tests.test_app + def test_chat_mock_approach(client): + response = client.post( + "/chat", + json={ + "approach": "mock", + "history": [{"user": "What is the capital of France?"}], + }, + ) + assert response.status_code == 200 + assert response.json["answer"] == "Paris" + ===========changed ref 21=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 threads = 1 if num_cpus == 1 else 2 timeout = 600 + worker_class = "gthread" ===========changed ref 22=========== + # module: tests.conftest + class MockedChatApproach(ChatReadRetrieveReadApproach): + def run(self, history, overrides): + messages = ChatReadRetrieveReadApproach.get_messages_from_history(self, ChatReadRetrieveReadApproach.query_prompt_template, "gpt-3.5-turbo", history, "Generate search query") + assert messages[0]["role"] == "system" + assert messages[1]["content"] == "Generate search query" + assert messages[1]["role"] == "user" + return {"answer": "Paris", "data_points": [], "thoughts": ""} +
app.backend.app/ensure_openai_token
Modified
Azure-Samples~azure-search-openai-demo
df48d8c23b4d0ae0ec6487cecd715a9db2943f64
Use an app factory pattern to enable app testing (#495)
<0>:<add> openai_token = current_app.config[CONFIG_OPENAI_TOKEN] <del> global openai_token <2>:<add> openai_token = current_app.config[CONFIG_CREDENTIAL].get_token("https://cognitiveservices.azure.com/.default") <del> openai_token = azure_credential.get_token("https://cognitiveservices.azure.com/.default") <3>:<add> current_app.config[CONFIG_OPENAI_TOKEN] = openai_token
# module: app.backend.app + @bp.before_request - @app.before_request def ensure_openai_token(): <0> global openai_token <1> if openai_token.expires_on < time.time() + 60: <2> openai_token = azure_credential.get_token("https://cognitiveservices.azure.com/.default") <3> openai.api_key = openai_token.token <4>
===========unchanged ref 0=========== at: app.backend.app AZURE_SEARCH_SERVICE = os.getenv("AZURE_SEARCH_SERVICE", "gptkb") AZURE_SEARCH_INDEX = os.getenv("AZURE_SEARCH_INDEX", "gptkbindex") at: app.backend.app.create_app azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) ===========changed ref 0=========== # module: app.backend.app - @app.route("/", defaults={"path": "index.html"}) - @app.route("/<path:path>") - def static_file(path): - return app.send_static_file(path) - ===========changed ref 1=========== # module: app.backend.app + @bp.route("/assets/<path:path>") + def assets(path): + return send_from_directory("static/assets", path) + ===========changed ref 2=========== # module: app.backend.app + @bp.route("/favicon.ico") + def favicon(): + return bp.send_static_file("favicon.ico") + ===========changed ref 3=========== # module: app.backend.app + @bp.route("/") + def index(): + return bp.send_static_file("index.html") + ===========changed ref 4=========== # module: app.backend.app + @bp.route("/chat", methods=["POST"]) - @app.route("/chat", methods=["POST"]) def chat(): + if not request.is_json: - if not request.json: + return jsonify({"error": "request must be json"}), 415 - return jsonify({"error": "request must be json"}), 400 approach = request.json["approach"] try: + impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) - impl = chat_approaches.get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 r = impl.run(request.json["history"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /chat") return jsonify({"error": str(e)}), 500 ===========changed ref 5=========== # module: app.backend.app + @bp.route("/ask", methods=["POST"]) - @app.route("/ask", methods=["POST"]) def ask(): + if not request.is_json: - if not request.json: + return jsonify({"error": "request must be json"}), 415 - return jsonify({"error": "request must be json"}), 400 approach = request.json["approach"] try: + impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) - impl = ask_approaches.get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 r = impl.run(request.json["question"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 6=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. + @bp.route("/content/<path>") - @app.route("/content/<path>") def content_file(path): + blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) blob = blob_container.get_blob_client(path).download_blob() if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() blob.readinto(blob_file) blob_file.seek(0) return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) ===========changed ref 7=========== # module: app.backend.app + def create_app(): + app = Flask(__name__) + + # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, + # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the + # keys for each service + # If you encounter a blocking error during a DefaultAzureCredntial resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True) + azure_credential = DefaultAzureCredential(exclude_shared_token_cache_credential = True) + + # Set up clients for Cognitive Search and Storage + search_client = SearchClient( + endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", + index_name=AZURE_SEARCH_INDEX, + credential=azure_credential) + blob_client = BlobServiceClient( + account_url=f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", + credential=azure_credential) + + # Used by the OpenAI SDK + openai.api_type = "azure" + openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com" + openai.api_version = "2023-05-15" + + # Comment these two lines out if using keys, set your API key in the OPENAI_API_KEY environment variable instead + openai.api_type = "azure_ad" + openai_token = azure_credential.get_token( + "https://cognitiveservices.azure.com/.default" + ) + openai.api_key = openai_token.token + + # Store on app.config for later use inside requests + app.config[CONFIG_OPENAI_TOKEN] = openai_token + app.config[CONFIG_CREDENTIAL] = azure_credential</s>
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.retrieve
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<9>:<add> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] <del> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] <15>:<add> query_text = "" <del> query_text = None <19>:<add> r = await self.search_client.search(query_text, <del> r = self.search_client.search(query_text,
# module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] <10> else: <11> query_vector = None <12> <13> # Only keep the text query if the retrieval mode uses text, otherwise drop it <14> if not has_text: <15> query_text = None <16> <17> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <18> if overrides.get("semantic_ranker") and has_text: <19> r = self.search_client.search(query_text, <20> filter=filter, <21> query_type=QueryType.SEMANTIC, <22> query_language="en-us", <23> query_speller="lexicon", <24> semantic_configuration_name="default", <25> top = top, <26> query_caption="extractive|highlight-false" if use_semantic_captions else None, <27> vector=query_vector, <28> </s>
===========below chunk 0=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 1 vector_fields="embedding" if query_vector else None) else: r = self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) for doc in r] else: self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) for doc in r] content = "\n".join(self.results) return content ===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach template_prefix = \ "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \ "Answer the question using only the data provided in the information sources below. " \ "For tabular information return it as an html table. Do not return markdown format. " \ "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \ "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \ "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \ "If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \ "Never quote tool names as sources." \ "If you cannot answer using the sources below, say that you don't know. " \ "\n\nYou can access to the following tools:" template_suffix = """ Begin! Question: {input} Thought: {agent_scratchpad}""" CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc." at: app.backend.approaches.readretrieveread.ReadRetrieveReadApproach.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment ===========unchanged ref 1=========== at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== + # module: app.backend.approaches + +
app.backend.approaches.readretrieveread/ReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<del> # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple <1>:<add> retrieve_results = None <del> self.results = None <2>:<add> async def retrieve_and_store(q: str) -> Any: <add> nonlocal retrieve_results <add> retrieve_results, content = await self.retrieve(q, overrides) <add> return content <8>:<add> func=lambda _: 'Not implemented', <add> coroutine=retrieve_and_store, <del> func=lambda q: self.retrieve(q, overrides), <22>:<add> agent = ZeroShotAgent(llm_chain = chain), <del> agent = ZeroShotAgent(llm_chain = chain, tools = tools), <26>:<add> result = await agent_exec.arun(q) <del> result = agent_exec.run(q)
# module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: <0> # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple <1> self.results = None <2> <3> # Use to capture thought process during iterations <4> cb_handler = HtmlCallbackHandler() <5> cb_manager = CallbackManager(handlers=[cb_handler]) <6> <7> acs_tool = Tool(name="CognitiveSearch", <8> func=lambda q: self.retrieve(q, overrides), <9> description=self.CognitiveSearchToolDescription, <10> callbacks=cb_manager) <11> employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) <12> tools = [acs_tool, employee_tool] <13> <14> prompt = ZeroShotAgent.create_prompt( <15> tools=tools, <16> prefix=overrides.get("prompt_template_prefix") or self.template_prefix, <17> suffix=overrides.get("prompt_template_suffix") or self.template_suffix, <18> input_variables = ["input", "agent_scratchpad"]) <19> llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) <20> chain = LLMChain(llm = llm, prompt = prompt) <21> agent_exec = AgentExecutor.from_agent_and_tools( <22> agent = ZeroShotAgent(llm_chain = chain, tools = tools), <23> tools = tools, <24> verbose = True, <25> callback_manager = cb_manager) <26> result = agent_exec.run(q) <27> <28> # Remove references to tool names that might be confused with</s>
===========below chunk 0=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========unchanged ref 0=========== at: app.backend.approaches.readretrieveread EmployeeInfoTool(employee_name: str, callbacks: Callbacks=None) at: approaches.approach.AskApproach run(self, q: str, overrides: dict[str, Any]) -> Any at: openai api_key = os.environ.get("OPENAI_API_KEY") ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # If retrieval mode includes vectors, compute an embedding for the query if has_vector: + query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] - query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: + query_text = "" - query_text = None # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", </s> ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s>language="en-us", query_speller="lexicon", semantic_configuration_name="default", top = top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) else: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: + results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) async for doc in r] - self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) for doc in r] else: + results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r] - self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) for</s> ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 2 <s> r] + content = "\n".join(results) - content = "\n".join(self.results) + return results, content - return content ===========changed ref 3=========== + # module: app.backend.approaches + +
app.backend.approaches.readretrieveread/EmployeeInfoTool.__init__
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<5>:<add> self.func = lambda _: 'Not implemented' <add> self.coroutine = self.employee_info <del> self.func = self.employee_info
# module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): <0> super().__init__(filename="data/employeeinfo.csv", <1> key_field="name", <2> name="Employee", <3> description="useful for answering questions about the employee, their benefits and other personal information", <4> callbacks=callbacks) <5> self.func = self.employee_info <6> self.employee_name = employee_name <7>
===========unchanged ref 0=========== at: lookuptool.CsvLookupTool data: dict[str, str] = {} __init__(filename: Union[str, Path], key_field: str, name: str="lookup", description: str="useful to look up details given an input key as opposite to searching data with an unstructured question", callbacks: Callbacks=None) __init__(self, filename: Union[str, Path], key_field: str, name: str="lookup", description: str="useful to look up details given an input key as opposite to searching data with an unstructured question", callbacks: Callbacks=None) ===========changed ref 0=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s> ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s> agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain = chain), - agent = ZeroShotAgent(llm_chain = chain, tools = tools), tools = tools, verbose = True, callback_manager = cb_manager) + result = await agent_exec.arun(q) - result = agent_exec.run(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") + return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} - return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # If retrieval mode includes vectors, compute an embedding for the query if has_vector: + query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] - query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: + query_text = "" - query_text = None # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", </s> ===========changed ref 3=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s>language="en-us", query_speller="lexicon", semantic_configuration_name="default", top = top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) else: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: + results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) async for doc in r] - self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) for doc in r] else: + results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r] - self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) for</s>
app.backend.core.modelhelper/get_token_limit
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<2>:<add> return MODELS_2_TOKEN_LIMITS[model_id] <del> return MODELS_2_TOKEN_LIMITS.get(model_id)
# module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: <0> if model_id not in MODELS_2_TOKEN_LIMITS: <1> raise ValueError("Expected model gpt-35-turbo and above") <2> return MODELS_2_TOKEN_LIMITS.get(model_id) <3>
===========unchanged ref 0=========== at: app.backend.core.modelhelper MODELS_2_TOKEN_LIMITS = { "gpt-35-turbo": 4000, "gpt-3.5-turbo": 4000, "gpt-35-turbo-16k": 16000, "gpt-3.5-turbo-16k": 16000, "gpt-4": 8100, "gpt-4-32k": 32000 } ===========changed ref 0=========== + # module: app.backend.approaches + + ===========changed ref 1=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 2=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s> ===========changed ref 3=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s> agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain = chain), - agent = ZeroShotAgent(llm_chain = chain, tools = tools), tools = tools, verbose = True, callback_manager = cb_manager) + result = await agent_exec.arun(q) - result = agent_exec.run(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") + return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} - return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========changed ref 4=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # If retrieval mode includes vectors, compute an embedding for the query if has_vector: + query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] - query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: + query_text = "" - query_text = None # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", </s>
tests.conftest/client
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> # mock the DefaultAzureCredential <add> with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: <add> mock_default_azure_credential.return_value = MockAzureCredential() <add> quart_app = app.create_app() <del> return app.test_client()
# module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): <0> return app.test_client() <1>
===========changed ref 0=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app - ===========changed ref 1=========== + # module: app.backend.approaches + + ===========changed ref 2=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 3=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 4=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 5=========== + # module: app.backend.main + app = create_app() + ===========changed ref 6=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 7=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 8=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s> ===========changed ref 9=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s> agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain = chain), - agent = ZeroShotAgent(llm_chain = chain, tools = tools), tools = tools, verbose = True, callback_manager = cb_manager) + result = await agent_exec.arun(q) - result = agent_exec.run(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") + return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} - return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} ===========changed ref 10=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: - def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any: has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False top = overrides.get("top") or 3 exclude_category = overrides.get("exclude_category") or None filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None # If retrieval mode includes vectors, compute an embedding for the query if has_vector: + query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"] - query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: + query_text = "" - query_text = None # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: + r = await self.search_client.search(query_text, - r = self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", </s>
tests.test_app/test_index
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> response = await client.get("/") <del> response = client.get("/")
# module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): <0> response = client.get("/") <1> assert response.status_code == 200 <2>
===========changed ref 0=========== + # module: app.backend.approaches + + ===========changed ref 1=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 2=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 3=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 4=========== + # module: app.backend.main + app = create_app() + ===========changed ref 5=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 6=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 7=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 8=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 9=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app - ===========changed ref 10=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s> ===========changed ref 11=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s> agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain = chain), - agent = ZeroShotAgent(llm_chain = chain, tools = tools), tools = tools, verbose = True, callback_manager = cb_manager) + result = await agent_exec.arun(q) - result = agent_exec.run(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") + return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} - return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
tests.test_app/test_ask_request_must_be_json
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> response = await client.post("/ask") <del> response = client.post("/ask") <2>:<add> result = await response.get_json() <add> assert result["error"] == "request must be json" <del> assert response.json["error"] == "request must be json"
# module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): <0> response = client.post("/ask") <1> assert response.status_code == 415 <2> assert response.json["error"] == "request must be json" <3>
===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 1=========== + # module: app.backend.approaches + + ===========changed ref 2=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 3=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 4=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 5=========== + # module: app.backend.main + app = create_app() + ===========changed ref 6=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 7=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 8=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 9=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 10=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app - ===========changed ref 11=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s> ===========changed ref 12=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s> agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain = chain), - agent = ZeroShotAgent(llm_chain = chain, tools = tools), tools = tools, verbose = True, callback_manager = cb_manager) + result = await agent_exec.arun(q) - result = agent_exec.run(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") + return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} - return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
tests.test_app/test_ask_with_unknown_approach
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> response = await client.post("/ask", json={"approach": "test"}) <del> response = client.post("/ask", json={"approach": "test"})
# module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): <0> response = client.post("/ask", json={"approach": "test"}) <1> assert response.status_code == 400 <2>
===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 2=========== + # module: app.backend.approaches + + ===========changed ref 3=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 4=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 5=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 6=========== + # module: app.backend.main + app = create_app() + ===========changed ref 7=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 8=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 9=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 10=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 11=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app - ===========changed ref 12=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s> ===========changed ref 13=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s> agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain = chain), - agent = ZeroShotAgent(llm_chain = chain, tools = tools), tools = tools, verbose = True, callback_manager = cb_manager) + result = await agent_exec.arun(q) - result = agent_exec.run(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") + return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} - return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
tests.test_app/test_ask_mock_approach
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) <del> response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) <2>:<add> result = await response.get_json() <add> assert result["answer"] == "Paris" <del> assert response.json["answer"] == "Paris"
# module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): <0> response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) <1> assert response.status_code == 200 <2> assert response.json["answer"] == "Paris" <3>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 3=========== + # module: app.backend.approaches + + ===========changed ref 4=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 5=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 6=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 7=========== + # module: app.backend.main + app = create_app() + ===========changed ref 8=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 9=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 10=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 11=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 12=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app - ===========changed ref 13=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s> ===========changed ref 14=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 <s> agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain = chain), - agent = ZeroShotAgent(llm_chain = chain, tools = tools), tools = tools, verbose = True, callback_manager = cb_manager) + result = await agent_exec.arun(q) - result = agent_exec.run(q) # Remove references to tool names that might be confused with a citation result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "") + return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()} - return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
tests.test_app/test_chat_request_must_be_json
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> response = await client.post("/chat") <del> response = client.post("/chat") <2>:<add> result = await response.get_json() <add> assert result["error"] == "request must be json" <del> assert response.json["error"] == "request must be json"
# module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): <0> response = client.post("/chat") <1> assert response.status_code == 415 <2> assert response.json["error"] == "request must be json" <3>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): + response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 4=========== + # module: app.backend.approaches + + ===========changed ref 5=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 6=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 7=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 8=========== + # module: app.backend.main + app = create_app() + ===========changed ref 9=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 10=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 11=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 12=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 13=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app - ===========changed ref 14=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s>
tests.test_app/test_chat_with_unknown_approach
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> response = await client.post("/chat", json={"approach": "test"}) <del> response = client.post("/chat", json={"approach": "test"})
# module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): <0> response = client.post("/chat", json={"approach": "test"}) <1> assert response.status_code == 400 <2>
===========unchanged ref 0=========== at: tests.test_app.test_ask_mock_approach result = await response.get_json() ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): + response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 5=========== + # module: app.backend.approaches + + ===========changed ref 6=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 7=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 8=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 9=========== + # module: app.backend.main + app = create_app() + ===========changed ref 10=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 11=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 12=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 13=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 14=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app - ===========changed ref 15=========== # module: app.backend.approaches.readretrieveread + class ReadRetrieveReadApproach(AskApproach): - class ReadRetrieveReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple + retrieve_results = None - self.results = None + async def retrieve_and_store(q: str) -> Any: + nonlocal retrieve_results + retrieve_results, content = await self.retrieve(q, overrides) + return content # Use to capture thought process during iterations cb_handler = HtmlCallbackHandler() cb_manager = CallbackManager(handlers=[cb_handler]) acs_tool = Tool(name="CognitiveSearch", + func=lambda _: 'Not implemented', + coroutine=retrieve_and_store, - func=lambda q: self.retrieve(q, overrides), description=self.CognitiveSearchToolDescription, callbacks=cb_manager) employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager) tools = [acs_tool, employee_tool] prompt = ZeroShotAgent.create_prompt( tools=tools, prefix=overrides.get("prompt_template_prefix") or self.template_prefix, suffix=overrides.get("prompt_template_suffix") or self.template_suffix, input_variables = ["input", "agent_scratchpad"]) llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key) chain = LLMChain(llm = llm, prompt = prompt) agent_exec = AgentExecutor.from_agent_and_tools( + agent = ZeroShotAgent(llm_chain =</s>
tests.test_app/test_chat_mock_approach
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> response = await client.post( <del> response = client.post( <8>:<add> result = await response.get_json() <add> assert result["answer"] == "Paris" <del> assert response.json["answer"] == "Paris"
# module: tests.test_app + @pytest.mark.asyncio + async def test_chat_mock_approach(client): - def test_chat_mock_approach(client): <0> response = client.post( <1> "/chat", <2> json={ <3> "approach": "mock", <4> "history": [{"user": "What is the capital of France?"}], <5> }, <6> ) <7> assert response.status_code == 200 <8> assert response.json["answer"] == "Paris" <9>
===========unchanged ref 0=========== at: _pytest.mark.structures MARK_GEN = MarkGenerator(_ispytest=True) ===========changed ref 0=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 1=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 2=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 3=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 4=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 5=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): + response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 6=========== + # module: app.backend.approaches + + ===========changed ref 7=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 8=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 9=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 10=========== + # module: app.backend.main + app = create_app() + ===========changed ref 11=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 12=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 13=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 14=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 15=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app -
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<19>:<add> chat_completion = await openai.ChatCompletion.acreate( <del> chat_completion = openai.ChatCompletion.create(
# module: app.backend.approaches.chatreadretrieveread + class ChatReadRetrieveReadApproach(ChatApproach): - class ChatReadRetrieveReadApproach(Approach): + def run(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any: - def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> user_q = 'Generate search query for: ' + history[-1]["user"] <8> <9> # STEP 1: Generate an optimized keyword search query based on the chat history and the last question <10> messages = self.get_messages_from_history( <11> self.query_prompt_template, <12> self.chatgpt_model, <13> history, <14> user_q, <15> self.query_prompt_few_shots, <16> self.chatgpt_token_limit - len(user_q) <17> ) <18> <19> chat_completion = openai.ChatCompletion.create( <20> deployment_id=self.chatgpt_deployment, <21> model=self.chatgpt_model, <22> messages=messages, <23> temperature=0.0, <24> max_tokens=32, <25> n=1) <26> <27> query_text = chat_completion.choices[0].message.content <28> if query_text.strip() == "0": <29> query_text = history[-1]["user"] # Use the last user input if we failed to</s>
===========below chunk 0=========== # module: app.backend.approaches.chatreadretrieveread + class ChatReadRetrieveReadApproach(ChatApproach): - class ChatReadRetrieveReadApproach(Approach): + def run(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any: - def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 1 # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query # If retrieval mode includes vectors, compute an embedding for the query if has_vector: query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=query_text)["data"][0]["embedding"] else: query_vector = None # Only keep the text query if the retrieval mode uses text, otherwise drop it if not has_text: query_text = None # Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text) if overrides.get("semantic_ranker") and has_text: r = self.search_client.search(query_text, filter=filter, query_type=QueryType.SEMANTIC, query_language="en-us", query_speller="lexicon", semantic_configuration_name="default", top=top, query_caption="extractive|highlight-false" if use_semantic_captions else None, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) else: r = self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [</s> ===========below chunk 1=========== # module: app.backend.approaches.chatreadretrieveread + class ChatReadRetrieveReadApproach(ChatApproach): - class ChatReadRetrieveReadApproach(Approach): + def run(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any: - def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 2 <s> vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "" # STEP 3: Generate a contextual and content specific answer using the search results and chat history # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> prompt_override = overrides.get("prompt_override") if prompt_override is None: system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt) elif prompt_override.startswith(">>>"): system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt) else: system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt) messages = self.get_messages_from_</s> ===========below chunk 2=========== # module: app.backend.approaches.chatreadretrieveread + class ChatReadRetrieveReadApproach(ChatApproach): - class ChatReadRetrieveReadApproach(Approach): + def run(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> Any: - def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]) -> Any: # offset: 3 <s> system_message + "\n\nSources:\n" + content, self.chatgpt_model, history, history[-1]["user"], max_tokens=self.chatgpt_token_limit) chat_completion = openai.ChatCompletion.create( deployment_id=self.chatgpt_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.7, max_tokens=1024, n=1) chat_content = chat_completion.choices[0].message.content msg_to_display = '\n\n'.join([str(message) for message in messages]) return {"data_points": results, "answer": chat_content, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')} ===========unchanged ref 0=========== at: app.backend.approaches.chatreadretrieveread.ChatReadRetrieveReadApproach SYSTEM = "system" USER = "user" ASSISTANT = "assistant" system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook. Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>. Try not to repeat questions that have already been asked. Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
app.backend.approaches.chatreadretrieveread/ChatReadRetrieveReadApproach.get_messages_from_history
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<12>:<add> if bot_msg := h.get("bot"): <del> if h.get("bot"): <13>:<add> message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index) <del> message_builder.append_message(self.ASSISTANT, h.get('bot'), index=append_index) <14>:<add> if user_msg := h.get("user"): <add> message_builder.append_message(self.USER, user_msg, index=append_index) <del> message_builder.append_message(self.USER, h.get('user'), index=append_index)
<s>(Approach): + def get_messages_from_history(self, system_prompt: str, model_id: str, history: list[dict[str, str]], user_conv: str, few_shots = [], max_tokens: int = 4096) -> list: - def get_messages_from_history(self, system_prompt: str, model_id: str, history: Sequence[dict[str, str]], user_conv: str, few_shots = [], max_tokens: int = 4096) -> []: <0> message_builder = MessageBuilder(system_prompt, model_id) <1> <2> # Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message. <3> for shot in few_shots: <4> message_builder.append_message(shot.get('role'), shot.get('content')) <5> <6> user_content = user_conv <7> append_index = len(few_shots) + 1 <8> <9> message_builder.append_message(self.USER, user_content, index=append_index) <10> <11> for h in reversed(history[:-1]): <12> if h.get("bot"): <13> message_builder.append_message(self.ASSISTANT, h.get('bot'), index=append_index) <14> message_builder.append_message(self.USER, h.get('user'), index=append_index) <15> if message_builder.token_length > max_tokens: <16> break <17> <18> messages = message_builder.messages <19> return messages <20>
===========unchanged ref 0=========== at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) ===========changed ref 0=========== + # module: app.backend.approaches + + ===========changed ref 1=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 2=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 3=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 4=========== + # module: app.backend.main + app = create_app() + ===========changed ref 5=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 6=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 7=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 8=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 9=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 10=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 11=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 12=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 13=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_mock_approach(client): - def test_chat_mock_approach(client): + response = await client.post( - response = client.post( "/chat", json={ "approach": "mock", "history": [{"user": "What is the capital of France?"}], }, ) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 14=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): + response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 15=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 16=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app -
app.backend.approaches.retrievethenread/RetrieveThenReadApproach.run
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<9>:<add> query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=q))["data"][0]["embedding"] <del> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=q)["data"][0]["embedding"] <14>:<add> query_text = q if has_text else "" <del> query_text = q if has_text else None <18>:<add> r = await self.search_client.search(query_text, <del> r = self.search_client.search(query_text,
# module: app.backend.approaches.retrievethenread + class RetrieveThenReadApproach(AskApproach): - class RetrieveThenReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: <0> has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None] <1> has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None] <2> use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False <3> top = overrides.get("top") or 3 <4> exclude_category = overrides.get("exclude_category") or None <5> filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None <6> <7> # If retrieval mode includes vectors, compute an embedding for the query <8> if has_vector: <9> query_vector = openai.Embedding.create(engine=self.embedding_deployment, input=q)["data"][0]["embedding"] <10> else: <11> query_vector = None <12> <13> # Only keep the text query if the retrieval mode uses text, otherwise drop it <14> query_text = q if has_text else None <15> <16> # Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text) <17> if overrides.get("semantic_ranker") and has_text: <18> r = self.search_client.search(query_text, <19> filter=filter, <20> query_type=QueryType.SEMANTIC, <21> query_language="en-us", <22> query_speller="lexicon", <23> semantic_configuration_name="default", <24> top=top, <25> query_caption="extractive|highlight-false" if use_semantic_captions else None, <26> vector=query_vector, <27> top_k=50 if</s>
===========below chunk 0=========== # module: app.backend.approaches.retrievethenread + class RetrieveThenReadApproach(AskApproach): - class RetrieveThenReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 1 vector_fields="embedding" if query_vector else None) else: r = self.search_client.search(query_text, filter=filter, top=top, vector=query_vector, top_k=50 if query_vector else None, vector_fields="embedding" if query_vector else None) if use_semantic_captions: results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r] else: results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r] content = "\n".join(results) message_builder = MessageBuilder(overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model) # add user question user_content = q + "\n" + f"Sources:\n {content}" message_builder.append_message('user', user_content) # Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message. message_builder.append_message('assistant', self.answer) message_builder.append_message('user', self.question) messages = message_builder.messages chat_completion = openai.ChatCompletion.create( deployment_id=self.openai_deployment, model=self.chatgpt_model, messages=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n</s> ===========below chunk 1=========== # module: app.backend.approaches.retrievethenread + class RetrieveThenReadApproach(AskApproach): - class RetrieveThenReadApproach(Approach): + def run(self, q: str, overrides: dict[str, Any]) -> Any: - def run(self, q: str, overrides: dict[str, Any]) -> Any: # offset: 2 <s>=messages, temperature=overrides.get("temperature") or 0.3, max_tokens=1024, n=1) return {"data_points": results, "answer": chat_completion.choices[0].message.content, "thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>" + '\n\n'.join([str(message) for message in messages])} ===========unchanged ref 0=========== at: app.backend.approaches.retrievethenread.RetrieveThenReadApproach.__init__ self.search_client = search_client self.embedding_deployment = embedding_deployment at: approaches.approach.AskApproach run(self, q: str, overrides: dict[str, Any]) -> Any at: core.messagebuilder MessageBuilder(system_content: str, chatgpt_model: str) at: openai.api_resources.chat_completion ChatCompletion(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.chat_completion.ChatCompletion engine_required = False OBJECT_NAME = "chat.completions" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: openai.api_resources.embedding Embedding(engine: Optional[str]=None, *, id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int]=None, api_base=None, **params) at: openai.api_resources.embedding.Embedding OBJECT_NAME = "embeddings" acreate(api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, /, *, api_key=None, api_base=None, api_type=None, request_id=None, api_version=None, organization=None, **params) at: text nonewlines(s: str) -> str ===========unchanged ref 1=========== at: typing.Mapping get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T] get(key: _KT) -> Optional[_VT_co] ===========changed ref 0=========== + # module: app.backend.approaches + + ===========changed ref 1=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 2=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 3=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 4=========== + # module: app.backend.main + app = create_app() + ===========changed ref 5=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 6=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 7=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 8=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 9=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id)
app.backend.app/index
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> return await bp.send_static_file("index.html") <del> return bp.send_static_file("index.html")
# module: app.backend.app @bp.route("/") + async def index(): - def index(): <0> return bp.send_static_file("index.html") <1>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder='static') ===========changed ref 0=========== + # module: app.backend.approaches + + ===========changed ref 1=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 2=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 3=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 4=========== + # module: app.backend.main + app = create_app() + ===========changed ref 5=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 6=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 7=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 8=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 9=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 10=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 11=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 12=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 13=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" + timeout = 600 num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 - threads = 1 if num_cpus == 1 else 2 - timeout = 600 - worker_class = "gthread" + worker_class = "uvicorn.workers.UvicornWorker" ===========changed ref 14=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_mock_approach(client): - def test_chat_mock_approach(client): + response = await client.post( - response = client.post( "/chat", json={ "approach": "mock", "history": [{"user": "What is the capital of France?"}], }, ) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 15=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): + response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 16=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 17=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app -
app.backend.app/favicon
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> return await bp.send_static_file("favicon.ico") <del> return bp.send_static_file("favicon.ico")
# module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): <0> return bp.send_static_file("favicon.ico") <1>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder='static') ===========changed ref 0=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 1=========== + # module: app.backend.approaches + + ===========changed ref 2=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 3=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 4=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 5=========== + # module: app.backend.main + app = create_app() + ===========changed ref 6=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 7=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 8=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 9=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 10=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 11=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 12=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 13=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 14=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" + timeout = 600 num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 - threads = 1 if num_cpus == 1 else 2 - timeout = 600 - worker_class = "gthread" + worker_class = "uvicorn.workers.UvicornWorker" ===========changed ref 15=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_mock_approach(client): - def test_chat_mock_approach(client): + response = await client.post( - response = client.post( "/chat", json={ "approach": "mock", "history": [{"user": "What is the capital of France?"}], }, ) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 16=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): + response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 17=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 18=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app -
app.backend.app/assets
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<0>:<add> return await send_from_directory("static/assets", path) <del> return send_from_directory("static/assets", path)
# module: app.backend.app @bp.route("/assets/<path:path>") + async def assets(path): - def assets(path): <0> return send_from_directory("static/assets", path) <1>
===========unchanged ref 0=========== at: app.backend.app bp = Blueprint("routes", __name__, static_folder='static') ===========changed ref 0=========== # module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): + return await bp.send_static_file("favicon.ico") - return bp.send_static_file("favicon.ico") ===========changed ref 1=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 2=========== + # module: app.backend.approaches + + ===========changed ref 3=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 4=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 5=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 6=========== + # module: app.backend.main + app = create_app() + ===========changed ref 7=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 8=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 9=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 10=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 11=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 12=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 13=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 14=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 15=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" + timeout = 600 num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 - threads = 1 if num_cpus == 1 else 2 - timeout = 600 - worker_class = "gthread" + worker_class = "uvicorn.workers.UvicornWorker" ===========changed ref 16=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_mock_approach(client): - def test_chat_mock_approach(client): + response = await client.post( - response = client.post( "/chat", json={ "approach": "mock", "history": [{"user": "What is the capital of France?"}], }, ) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 17=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): + response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 18=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name ===========changed ref 19=========== # module: tests.conftest - @pytest.fixture() - def app(): - # mock the DefaultAzureCredential - with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: - mock_default_azure_credential.return_value = MockAzureCredential() - _app = backend_app.create_app() - _app.config.update( - { - "TESTING": True, - backend_app.CONFIG_ASK_APPROACHES: {"mock": MockedAskApproach()}, - backend_app.CONFIG_CHAT_APPROACHES: {"mock": MockedChatApproach()}, - } - ) - - yield _app -
app.backend.app/content_file
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<1>:<add> blob = await blob_container.get_blob_client(path).download_blob() <del> blob = blob_container.get_blob_client(path).download_blob() <8>:<add> await blob.readinto(blob_file) <del> blob.readinto(blob_file) <10>:<add> return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) <del> return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path)
# module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @bp.route("/content/<path>") + async def content_file(path): - def content_file(path): <0> blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) <1> blob = blob_container.get_blob_client(path).download_blob() <2> if not blob.properties or not blob.properties.has_key("content_settings"): <3> abort(404) <4> mime_type = blob.properties["content_settings"]["content_type"] <5> if mime_type == "application/octet-stream": <6> mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" <7> blob_file = io.BytesIO() <8> blob.readinto(blob_file) <9> blob_file.seek(0) <10> return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) <11>
===========unchanged ref 0=========== at: app.backend.app AZURE_STORAGE_CONTAINER = os.getenv("AZURE_STORAGE_CONTAINER", "content") CONFIG_BLOB_CLIENT = "blob_client" bp = Blueprint("routes", __name__, static_folder='static') at: io BytesIO(initial_bytes: bytes=...) at: io.BytesIO seek(self, offset: int, whence: int=..., /) -> int at: mimetypes guess_type(url: Union[Text, PathLike[str]], strict: bool=...) -> Tuple[Optional[str], Optional[str]] ===========changed ref 0=========== # module: app.backend.app @bp.route("/assets/<path:path>") + async def assets(path): - def assets(path): + return await send_from_directory("static/assets", path) - return send_from_directory("static/assets", path) ===========changed ref 1=========== # module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): + return await bp.send_static_file("favicon.ico") - return bp.send_static_file("favicon.ico") ===========changed ref 2=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 3=========== + # module: app.backend.approaches + + ===========changed ref 4=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 5=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 6=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 7=========== + # module: app.backend.main + app = create_app() + ===========changed ref 8=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 9=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 10=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 11=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 12=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 13=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 14=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 15=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 16=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" + timeout = 600 num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 - threads = 1 if num_cpus == 1 else 2 - timeout = 600 - worker_class = "gthread" + worker_class = "uvicorn.workers.UvicornWorker" ===========changed ref 17=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_mock_approach(client): - def test_chat_mock_approach(client): + response = await client.post( - response = client.post( "/chat", json={ "approach": "mock", "history": [{"user": "What is the capital of France?"}], }, ) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 18=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_mock_approach(client): - def test_ask_mock_approach(client): + response = await client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) - response = client.post("/ask", json={"approach": "mock", "question": "What is the capital of France?"}) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris" ===========changed ref 19=========== # module: app.backend.approaches.readretrieveread class EmployeeInfoTool(CsvLookupTool): def __init__(self, employee_name: str, callbacks: Callbacks = None): super().__init__(filename="data/employeeinfo.csv", key_field="name", name="Employee", description="useful for answering questions about the employee, their benefits and other personal information", callbacks=callbacks) + self.func = lambda _: 'Not implemented' + self.coroutine = self.employee_info - self.func = self.employee_info self.employee_name = employee_name
app.backend.app/ask
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<2>:<add> request_json = await request.get_json() <add> approach = request_json["approach"] <del> approach = request.json["approach"] <7>:<add> r = await impl.run(request_json["question"], request_json.get("overrides") or {}) <del> r = impl.run(request.json["question"], request.json.get("overrides") or {})
# module: app.backend.app @bp.route("/ask", methods=["POST"]) + async def ask(): - def ask(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> approach = request.json["approach"] <3> try: <4> impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) <5> if not impl: <6> return jsonify({"error": "unknown approach"}), 400 <7> r = impl.run(request.json["question"], request.json.get("overrides") or {}) <8> return jsonify(r) <9> except Exception as e: <10> logging.exception("Exception in /ask") <11> return jsonify({"error": str(e)}), 500 <12>
===========unchanged ref 0=========== at: app.backend.app CONFIG_ASK_APPROACHES = "ask_approaches" bp = Blueprint("routes", __name__, static_folder='static') at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.app @bp.route("/assets/<path:path>") + async def assets(path): - def assets(path): + return await send_from_directory("static/assets", path) - return send_from_directory("static/assets", path) ===========changed ref 1=========== # module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): + return await bp.send_static_file("favicon.ico") - return bp.send_static_file("favicon.ico") ===========changed ref 2=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 3=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @bp.route("/content/<path>") + async def content_file(path): - def content_file(path): blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) + blob = await blob_container.get_blob_client(path).download_blob() - blob = blob_container.get_blob_client(path).download_blob() if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() + await blob.readinto(blob_file) - blob.readinto(blob_file) blob_file.seek(0) + return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) - return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) ===========changed ref 4=========== + # module: app.backend.approaches + + ===========changed ref 5=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 6=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 7=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 8=========== + # module: app.backend.main + app = create_app() + ===========changed ref 9=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 10=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 11=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 12=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 13=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 14=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 15=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 16=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 17=========== # module: app.backend.gunicorn.conf max_requests = 1000 max_requests_jitter = 50 log_file = "-" bind = "0.0.0.0" + timeout = 600 num_cpus = multiprocessing.cpu_count() workers = (num_cpus * 2) + 1 - threads = 1 if num_cpus == 1 else 2 - timeout = 600 - worker_class = "gthread" + worker_class = "uvicorn.workers.UvicornWorker" ===========changed ref 18=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_mock_approach(client): - def test_chat_mock_approach(client): + response = await client.post( - response = client.post( "/chat", json={ "approach": "mock", "history": [{"user": "What is the capital of France?"}], }, ) assert response.status_code == 200 + result = await response.get_json() + assert result["answer"] == "Paris" - assert response.json["answer"] == "Paris"
app.backend.app/chat
Modified
Azure-Samples~azure-search-openai-demo
9da71efaebcb92ab6d73546f303e34f772088534
Port to Quart (#503)
<2>:<add> request_json = await request.get_json() <add> approach = request_json["approach"] <del> approach = request.json["approach"] <7>:<add> r = await impl.run(request_json["history"], request_json.get("overrides") or {}) <del> r = impl.run(request.json["history"], request.json.get("overrides") or {})
# module: app.backend.app @bp.route("/chat", methods=["POST"]) + async def chat(): - def chat(): <0> if not request.is_json: <1> return jsonify({"error": "request must be json"}), 415 <2> approach = request.json["approach"] <3> try: <4> impl = current_app.config[CONFIG_CHAT_APPROACHES].get(approach) <5> if not impl: <6> return jsonify({"error": "unknown approach"}), 400 <7> r = impl.run(request.json["history"], request.json.get("overrides") or {}) <8> return jsonify(r) <9> except Exception as e: <10> logging.exception("Exception in /chat") <11> return jsonify({"error": str(e)}), 500 <12>
===========unchanged ref 0=========== at: app.backend.app CONFIG_CHAT_APPROACHES = "chat_approaches" bp = Blueprint("routes", __name__, static_folder='static') at: logging exception(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None ===========changed ref 0=========== # module: app.backend.app @bp.route("/assets/<path:path>") + async def assets(path): - def assets(path): + return await send_from_directory("static/assets", path) - return send_from_directory("static/assets", path) ===========changed ref 1=========== # module: app.backend.app @bp.route("/favicon.ico") + async def favicon(): - def favicon(): + return await bp.send_static_file("favicon.ico") - return bp.send_static_file("favicon.ico") ===========changed ref 2=========== # module: app.backend.app @bp.route("/") + async def index(): - def index(): + return await bp.send_static_file("index.html") - return bp.send_static_file("index.html") ===========changed ref 3=========== # module: app.backend.app @bp.route("/ask", methods=["POST"]) + async def ask(): - def ask(): if not request.is_json: return jsonify({"error": "request must be json"}), 415 + request_json = await request.get_json() + approach = request_json["approach"] - approach = request.json["approach"] try: impl = current_app.config[CONFIG_ASK_APPROACHES].get(approach) if not impl: return jsonify({"error": "unknown approach"}), 400 + r = await impl.run(request_json["question"], request_json.get("overrides") or {}) - r = impl.run(request.json["question"], request.json.get("overrides") or {}) return jsonify(r) except Exception as e: logging.exception("Exception in /ask") return jsonify({"error": str(e)}), 500 ===========changed ref 4=========== # module: app.backend.app # Serve content files from blob storage from within the app to keep the example self-contained. # *** NOTE *** this assumes that the content files are public, or at least that all users of the app # can access all the files. This is also slow and memory hungry. @bp.route("/content/<path>") + async def content_file(path): - def content_file(path): blob_container = current_app.config[CONFIG_BLOB_CLIENT].get_container_client(AZURE_STORAGE_CONTAINER) + blob = await blob_container.get_blob_client(path).download_blob() - blob = blob_container.get_blob_client(path).download_blob() if not blob.properties or not blob.properties.has_key("content_settings"): abort(404) mime_type = blob.properties["content_settings"]["content_type"] if mime_type == "application/octet-stream": mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream" blob_file = io.BytesIO() + await blob.readinto(blob_file) - blob.readinto(blob_file) blob_file.seek(0) + return await send_file(blob_file, mimetype=mime_type, as_attachment=False, attachment_filename=path) - return send_file(blob_file, mimetype=mime_type, as_attachment=False, download_name=path) ===========changed ref 5=========== + # module: app.backend.approaches + + ===========changed ref 6=========== # module: app.backend.approaches.approach + class AskApproach(ABC): + @abstractmethod + async def run(self, q: str, overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 7=========== # module: app.backend.approaches.approach + class ChatApproach(ABC): + @abstractmethod + async def run(self, history: list[dict], overrides: dict[str, Any]) -> Any: + ... + ===========changed ref 8=========== # module: app.backend.approaches.approach - class Approach: - def run(self, q: str, overrides: dict[str, Any]) -> Any: - raise NotImplementedError - ===========changed ref 9=========== + # module: app.backend.main + app = create_app() + ===========changed ref 10=========== # module: tests.conftest - @pytest.fixture() - def runner(app): - return app.test_cli_runner() - ===========changed ref 11=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_index(client): - def test_index(client): + response = await client.get("/") - response = client.get("/") assert response.status_code == 200 ===========changed ref 12=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_with_unknown_approach(client): - def test_chat_with_unknown_approach(client): + response = await client.post("/chat", json={"approach": "test"}) - response = client.post("/chat", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 13=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_with_unknown_approach(client): - def test_ask_with_unknown_approach(client): + response = await client.post("/ask", json={"approach": "test"}) - response = client.post("/ask", json={"approach": "test"}) assert response.status_code == 400 ===========changed ref 14=========== # module: app.backend.core.modelhelper def get_token_limit(model_id: str) -> int: if model_id not in MODELS_2_TOKEN_LIMITS: raise ValueError("Expected model gpt-35-turbo and above") + return MODELS_2_TOKEN_LIMITS[model_id] - return MODELS_2_TOKEN_LIMITS.get(model_id) ===========changed ref 15=========== # module: tests.conftest + @pytest_asyncio.fixture - @pytest.fixture() + async def client(): - def client(app): + # mock the DefaultAzureCredential + with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential: + mock_default_azure_credential.return_value = MockAzureCredential() + quart_app = app.create_app() - return app.test_client() ===========changed ref 16=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_chat_request_must_be_json(client): - def test_chat_request_must_be_json(client): + response = await client.post("/chat") - response = client.post("/chat") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json" ===========changed ref 17=========== # module: tests.test_app + @pytest.mark.asyncio + async def test_ask_request_must_be_json(client): - def test_ask_request_must_be_json(client): + response = await client.post("/ask") - response = client.post("/ask") assert response.status_code == 415 + result = await response.get_json() + assert result["error"] == "request must be json" - assert response.json["error"] == "request must be json"