id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 59
127
|
---|---|---|
d791178e0d57-1
|
verbose=verbose,
),
LLMChain(
llm=llm,
prompt=check_assertions_prompt,
output_key="checked_assertions",
verbose=verbose,
),
LLMChain(
llm=llm,
prompt=revised_summary_prompt,
output_key="revised_summary",
verbose=verbose,
),
LLMChain(
llm=llm,
output_key="all_true",
prompt=are_all_true_prompt,
verbose=verbose,
),
],
input_variables=["summary"],
output_variables=["all_true", "revised_summary"],
verbose=verbose,
)
return chain
[docs]class LLMSummarizationCheckerChain(Chain):
"""Chain for question-answering with self-verification.
Example:
.. code-block:: python
from langchain import OpenAI, LLMSummarizationCheckerChain
llm = OpenAI(temperature=0.0)
checker_chain = LLMSummarizationCheckerChain.from_llm(llm)
"""
sequential_chain: SequentialChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT
"""[Deprecated]"""
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
"""[Deprecated]"""
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT
"""[Deprecated]"""
are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT
"""[Deprecated]"""
input_key: str = "query" #: :meta private:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_summarization_checker/base.html
|
d791178e0d57-2
|
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
max_checks: int = 2
"""Maximum number of times to check the assertions. Default to double-checking."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMSummarizationCheckerChain with an llm is "
"deprecated. Please instantiate with"
" sequential_chain argument or using the from_llm class method."
)
if "sequential_chain" not in values and values["llm"] is not None:
values["sequential_chain"] = _load_sequential_chain(
values["llm"],
values.get("create_assertions_prompt", CREATE_ASSERTIONS_PROMPT),
values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT),
values.get("revised_summary_prompt", REVISED_SUMMARY_PROMPT),
values.get("are_all_true_prompt", ARE_ALL_TRUE_PROMPT),
verbose=values.get("verbose", False),
)
return values
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, Any],
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_summarization_checker/base.html
|
d791178e0d57-3
|
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
all_true = False
count = 0
output = None
original_input = inputs[self.input_key]
chain_input = original_input
while not all_true and count < self.max_checks:
output = self.sequential_chain(
{"summary": chain_input}, callbacks=_run_manager.get_child()
)
count += 1
if output["all_true"].strip() == "True":
break
if self.verbose:
print(output["revised_summary"])
chain_input = output["revised_summary"]
if not output:
raise ValueError("No output from chain")
return {self.output_key: output["revised_summary"].strip()}
@property
def _chain_type(self) -> str:
return "llm_summarization_checker_chain"
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT,
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT,
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT,
are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT,
verbose: bool = False,
**kwargs: Any,
) -> LLMSummarizationCheckerChain:
chain = _load_sequential_chain(
llm,
create_assertions_prompt,
check_assertions_prompt,
revised_summary_prompt,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_summarization_checker/base.html
|
d791178e0d57-4
|
create_assertions_prompt,
check_assertions_prompt,
revised_summary_prompt,
are_all_true_prompt,
verbose=verbose,
)
return cls(sequential_chain=chain, verbose=verbose, **kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_summarization_checker/base.html
|
f54b57b10006-0
|
Source code for langchain.chains.combine_documents.base
"""Base interface for chains combining documents."""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from pydantic import Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.docstore.document import Document
from langchain.prompts.base import BasePromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
def format_document(doc: Document, prompt: BasePromptTemplate) -> str:
"""Format a document into a string based on a prompt template."""
base_info = {"page_content": doc.page_content}
base_info.update(doc.metadata)
missing_metadata = set(prompt.input_variables).difference(base_info)
if len(missing_metadata) > 0:
required_metadata = [
iv for iv in prompt.input_variables if iv != "page_content"
]
raise ValueError(
f"Document prompt requires documents to have metadata variables: "
f"{required_metadata}. Received document with missing metadata: "
f"{list(missing_metadata)}."
)
document_info = {k: base_info[k] for k in prompt.input_variables}
return prompt.format(**document_info)
class BaseCombineDocumentsChain(Chain, ABC):
"""Base interface for chains combining documents."""
input_key: str = "input_documents" #: :meta private:
output_key: str = "output_text" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/combine_documents/base.html
|
f54b57b10006-1
|
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]:
"""Return the prompt length given the documents passed in.
Returns None if the method does not depend on the prompt length.
"""
return None
@abstractmethod
def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
"""Combine documents into a single string."""
@abstractmethod
async def acombine_docs(
self, docs: List[Document], **kwargs: Any
) -> Tuple[str, dict]:
"""Combine documents into a single string asynchronously."""
def _call(
self,
inputs: Dict[str, List[Document]],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
docs = inputs[self.input_key]
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
output, extra_return_dict = self.combine_docs(
docs, callbacks=_run_manager.get_child(), **other_keys
)
extra_return_dict[self.output_key] = output
return extra_return_dict
async def _acall(
self,
inputs: Dict[str, List[Document]],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/combine_documents/base.html
|
f54b57b10006-2
|
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
docs = inputs[self.input_key]
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
output, extra_return_dict = await self.acombine_docs(
docs, callbacks=_run_manager.get_child(), **other_keys
)
extra_return_dict[self.output_key] = output
return extra_return_dict
[docs]class AnalyzeDocumentChain(Chain):
"""Chain that splits documents, then analyzes it in pieces."""
input_key: str = "input_document" #: :meta private:
text_splitter: TextSplitter = Field(default_factory=RecursiveCharacterTextSplitter)
combine_docs_chain: BaseCombineDocumentsChain
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return self.combine_docs_chain.output_keys
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
document = inputs[self.input_key]
docs = self.text_splitter.create_documents([document])
# Other keys are assumed to be needed for LLM prediction
other_keys: Dict = {k: v for k, v in inputs.items() if k != self.input_key}
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/combine_documents/base.html
|
f54b57b10006-3
|
other_keys[self.combine_docs_chain.input_key] = docs
return self.combine_docs_chain(
other_keys, return_only_outputs=True, callbacks=_run_manager.get_child()
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/combine_documents/base.html
|
cf5e270ef9d0-0
|
Source code for langchain.chains.sql_database.base
"""Chain for interacting with SQL Database."""
from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.sql_database import SQLDatabase
from langchain.tools.sql_database.prompt import QUERY_CHECKER
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
[docs]class SQLDatabaseChain(Chain):
"""Chain for interacting with SQL Database.
Example:
.. code-block:: python
from langchain import SQLDatabaseChain, OpenAI, SQLDatabase
db = SQLDatabase(...)
db_chain = SQLDatabaseChain.from_llm(OpenAI(), db)
"""
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
database: SQLDatabase = Field(exclude=True)
"""SQL Database to connect to."""
prompt: Optional[BasePromptTemplate] = None
"""[Deprecated] Prompt to use to translate natural language to SQL."""
top_k: int = 5
"""Number of results to return from the query"""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/sql_database/base.html
|
cf5e270ef9d0-1
|
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the SQL table directly."""
use_query_checker: bool = False
"""Whether or not the query checker tool should be used to attempt
to fix the initial SQL from the LLM."""
query_checker_prompt: Optional[BasePromptTemplate] = None
"""The prompt template that should be used by the query checker"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an SQLDatabaseChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
database = values["database"]
prompt = values.get("prompt") or SQL_PROMPTS.get(
database.dialect, PROMPT
)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/sql_database/base.html
|
cf5e270ef9d0-2
|
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f"{inputs[self.input_key]}\nSQLQuery:"
_run_manager.on_text(input_text, verbose=self.verbose)
# If not present, then defaults to None which is all tables.
table_names_to_use = inputs.get("table_names_to_use")
table_info = self.database.get_table_info(table_names=table_names_to_use)
llm_inputs = {
"input": input_text,
"top_k": str(self.top_k),
"dialect": self.database.dialect,
"table_info": table_info,
"stop": ["\nSQLResult:"],
}
intermediate_steps: List = []
try:
intermediate_steps.append(llm_inputs) # input: sql generation
sql_cmd = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
if not self.use_query_checker:
_run_manager.on_text(sql_cmd, color="green", verbose=self.verbose)
intermediate_steps.append(
sql_cmd
) # output: sql generation (no checker)
intermediate_steps.append({"sql_cmd": sql_cmd}) # input: sql exec
result = self.database.run(sql_cmd)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/sql_database/base.html
|
cf5e270ef9d0-3
|
result = self.database.run(sql_cmd)
intermediate_steps.append(str(result)) # output: sql exec
else:
query_checker_prompt = self.query_checker_prompt or PromptTemplate(
template=QUERY_CHECKER, input_variables=["query", "dialect"]
)
query_checker_chain = LLMChain(
llm=self.llm_chain.llm, prompt=query_checker_prompt
)
query_checker_inputs = {
"query": sql_cmd,
"dialect": self.database.dialect,
}
checked_sql_command: str = query_checker_chain.predict(
callbacks=_run_manager.get_child(), **query_checker_inputs
).strip()
intermediate_steps.append(
checked_sql_command
) # output: sql generation (checker)
_run_manager.on_text(
checked_sql_command, color="green", verbose=self.verbose
)
intermediate_steps.append(
{"sql_cmd": checked_sql_command}
) # input: sql exec
result = self.database.run(checked_sql_command)
intermediate_steps.append(str(result)) # output: sql exec
sql_cmd = checked_sql_command
_run_manager.on_text("\nSQLResult: ", verbose=self.verbose)
_run_manager.on_text(result, color="yellow", verbose=self.verbose)
# If return direct, we just set the final result equal to
# the result of the sql query result, otherwise try to get a human readable
# final answer
if self.return_direct:
final_result = result
else:
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:"
llm_inputs["input"] = input_text
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/sql_database/base.html
|
cf5e270ef9d0-4
|
llm_inputs["input"] = input_text
intermediate_steps.append(llm_inputs) # input: final answer
final_result = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
intermediate_steps.append(final_result) # output: final answer
_run_manager.on_text(final_result, color="green", verbose=self.verbose)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
except Exception as exc:
# Append intermediate steps to exception, to aid in logging and later
# improvement of few shot prompt seeds
exc.intermediate_steps = intermediate_steps # type: ignore
raise exc
@property
def _chain_type(self) -> str:
return "sql_database_chain"
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
db: SQLDatabase,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> SQLDatabaseChain:
prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, database=db, **kwargs)
[docs]class SQLDatabaseSequentialChain(Chain):
"""Chain for querying SQL database that is a sequential chain.
The chain is as follows:
1. Based on the query, determine which tables to use.
2. Based on those tables, call the normal SQL database chain.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/sql_database/base.html
|
cf5e270ef9d0-5
|
2. Based on those tables, call the normal SQL database chain.
This is useful in cases where the number of tables in the database is large.
"""
decider_chain: LLMChain
sql_chain: SQLDatabaseChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
database: SQLDatabase,
query_prompt: BasePromptTemplate = PROMPT,
decider_prompt: BasePromptTemplate = DECIDER_PROMPT,
**kwargs: Any,
) -> SQLDatabaseSequentialChain:
"""Load the necessary chains."""
sql_chain = SQLDatabaseChain.from_llm(
llm, database, prompt=query_prompt, **kwargs
)
decider_chain = LLMChain(
llm=llm, prompt=decider_prompt, output_key="table_names"
)
return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/sql_database/base.html
|
cf5e270ef9d0-6
|
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_table_names = self.sql_chain.database.get_usable_table_names()
table_names = ", ".join(_table_names)
llm_inputs = {
"query": inputs[self.input_key],
"table_names": table_names,
}
_lowercased_table_names = [name.lower() for name in _table_names]
table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs)
table_names_to_use = [
name
for name in table_names_from_chain
if name.lower() in _lowercased_table_names
]
_run_manager.on_text("Table names to use:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(table_names_to_use), color="yellow", verbose=self.verbose
)
new_inputs = {
self.sql_chain.input_key: inputs[self.input_key],
"table_names_to_use": table_names_to_use,
}
return self.sql_chain(
new_inputs, callbacks=_run_manager.get_child(), return_only_outputs=True
)
@property
def _chain_type(self) -> str:
return "sql_database_sequential_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/sql_database/base.html
|
f043fac22094-0
|
Source code for langchain.chains.hyde.base
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from pydantic import Extra
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
from langchain.embeddings.base import Embeddings
[docs]class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: LLMChain
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_keys
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
return self.llm_chain.output_keys
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
[docs] def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
return list(np.array(embeddings).mean(axis=0))
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/hyde/base.html
|
f043fac22094-1
|
return list(np.array(embeddings).mean(axis=0))
[docs] def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for generation in result.generations[0]]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain(inputs, callbacks=_run_manager.get_child())
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: str,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain for a specific prompt key."""
prompt = PROMPT_MAP[prompt_key]
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/hyde/base.html
|
c758ad599ce9-0
|
Source code for langchain.chains.qa_with_sources.base
"""Question answering with sources over documents."""
from __future__ import annotations
import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import Extra, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.chains.qa_with_sources.map_reduce_prompt import (
COMBINE_PROMPT,
EXAMPLE_PROMPT,
QUESTION_PROMPT,
)
from langchain.docstore.document import Document
from langchain.prompts.base import BasePromptTemplate
class BaseQAWithSourcesChain(Chain, ABC):
"""Question answering with sources over documents."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
question_key: str = "question" #: :meta private:
input_docs_key: str = "docs" #: :meta private:
answer_key: str = "answer" #: :meta private:
sources_answer_key: str = "sources" #: :meta private:
return_source_documents: bool = False
"""Return the source documents."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
document_prompt: BasePromptTemplate = EXAMPLE_PROMPT,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/base.html
|
c758ad599ce9-1
|
document_prompt: BasePromptTemplate = EXAMPLE_PROMPT,
question_prompt: BasePromptTemplate = QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = COMBINE_PROMPT,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Construct the chain from an LLM."""
llm_question_chain = LLMChain(llm=llm, prompt=question_prompt)
llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt)
combine_results_chain = StuffDocumentsChain(
llm_chain=llm_combine_chain,
document_prompt=document_prompt,
document_variable_name="summaries",
)
combine_document_chain = MapReduceDocumentsChain(
llm_chain=llm_question_chain,
combine_document_chain=combine_results_chain,
document_variable_name="context",
)
return cls(
combine_documents_chain=combine_document_chain,
**kwargs,
)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> BaseQAWithSourcesChain:
"""Load chain from chain type."""
_chain_kwargs = chain_type_kwargs or {}
combine_document_chain = load_qa_with_sources_chain(
llm, chain_type=chain_type, **_chain_kwargs
)
return cls(combine_documents_chain=combine_document_chain, **kwargs)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/base.html
|
c758ad599ce9-2
|
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
_output_keys = [self.answer_key, self.sources_answer_key]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
return _output_keys
@root_validator(pre=True)
def validate_naming(cls, values: Dict) -> Dict:
"""Fix backwards compatability in naming."""
if "combine_document_chain" in values:
values["combine_documents_chain"] = values.pop("combine_document_chain")
return values
@abstractmethod
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
"""Get docs to run questioning over."""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
docs = self._get_docs(inputs)
answer = self.combine_documents_chain.run(
input_documents=docs, callbacks=_run_manager.get_child(), **inputs
)
if re.search(r"SOURCES:\s", answer):
answer, sources = re.split(r"SOURCES:\s", answer)
else:
sources = ""
result: Dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/base.html
|
c758ad599ce9-3
|
}
if self.return_source_documents:
result["source_documents"] = docs
return result
@abstractmethod
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
"""Get docs to run questioning over."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
docs = await self._aget_docs(inputs)
answer = await self.combine_documents_chain.arun(
input_documents=docs, callbacks=_run_manager.get_child(), **inputs
)
if re.search(r"SOURCES:\s", answer):
answer, sources = re.split(r"SOURCES:\s", answer)
else:
sources = ""
result: Dict[str, Any] = {
self.answer_key: answer,
self.sources_answer_key: sources,
}
if self.return_source_documents:
result["source_documents"] = docs
return result
[docs]class QAWithSourcesChain(BaseQAWithSourcesChain):
"""Question answering with sources over documents."""
input_docs_key: str = "docs" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_docs_key, self.question_key]
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
return inputs.pop(self.input_docs_key)
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/base.html
|
c758ad599ce9-4
|
return inputs.pop(self.input_docs_key)
@property
def _chain_type(self) -> str:
return "qa_with_sources_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/base.html
|
92de44e5f839-0
|
Source code for langchain.chains.qa_with_sources.vector_db
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any, Dict, List
from pydantic import Field, root_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
from langchain.docstore.document import Document
from langchain.vectorstores.base import VectorStore
[docs]class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain.llm.get_num_tokens(
doc.page_content
)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/vector_db.html
|
92de44e5f839-1
|
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
raise NotImplementedError("VectorDBQAWithSourcesChain does not support async")
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`"
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/vector_db.html
|
022fb0123bab-0
|
Source code for langchain.chains.qa_with_sources.retrieval
"""Question-answering with sources over an index."""
from typing import Any, Dict, List
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
[docs]class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)
"""Index to connect to."""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain.llm.get_num_tokens(
doc.page_content
)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
question = inputs[self.question_key]
docs = self.retriever.get_relevant_documents(question)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/retrieval.html
|
022fb0123bab-1
|
docs = self.retriever.get_relevant_documents(question)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
question = inputs[self.question_key]
docs = await self.retriever.aget_relevant_documents(question)
return self._reduce_tokens_below_limit(docs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/qa_with_sources/retrieval.html
|
947118fa9f6e-0
|
Source code for langchain.chains.llm_math.base
"""Chain that interprets a prompt and executes python code to do math."""
from __future__ import annotations
import math
import re
import warnings
from typing import Any, Dict, List, Optional
import numexpr
from pydantic import Extra, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.llm_math.prompt import PROMPT
from langchain.prompts.base import BasePromptTemplate
[docs]class LLMMathChain(Chain):
"""Chain that interprets a prompt and executes python code to do math.
Example:
.. code-block:: python
from langchain import LLMMathChain, OpenAI
llm_math = LLMMathChain.from_llm(OpenAI())
"""
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
prompt: BasePromptTemplate = PROMPT
"""[Deprecated] Prompt to use to translate to python if necessary."""
input_key: str = "question" #: :meta private:
output_key: str = "answer" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_math/base.html
|
947118fa9f6e-1
|
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMMathChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
prompt = values.get("prompt", PROMPT)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
def _evaluate_expression(self, expression: str) -> str:
try:
local_dict = {"pi": math.pi, "e": math.e}
output = str(
numexpr.evaluate(
expression.strip(),
global_dict={}, # restrict access to globals
local_dict=local_dict, # add common mathematical functions
)
)
except Exception as e:
raise ValueError(
f'LLMMathChain._evaluate("{expression}") raised error: {e}.'
" Please try again with a valid numerical expression"
)
# Remove any leading and trailing brackets from the output
return re.sub(r"^\[|\]$", "", output)
def _process_llm_result(
self, llm_output: str, run_manager: CallbackManagerForChainRun
) -> Dict[str, str]:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_math/base.html
|
947118fa9f6e-2
|
) -> Dict[str, str]:
run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
run_manager.on_text("\nAnswer: ", verbose=self.verbose)
run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
raise ValueError(f"unknown format from LLM: {llm_output}")
return {self.output_key: answer}
async def _aprocess_llm_result(
self,
llm_output: str,
run_manager: AsyncCallbackManagerForChainRun,
) -> Dict[str, str]:
await run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
await run_manager.on_text("\nAnswer: ", verbose=self.verbose)
await run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_math/base.html
|
947118fa9f6e-3
|
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
raise ValueError(f"unknown format from LLM: {llm_output}")
return {self.output_key: answer}
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_run_manager.on_text(inputs[self.input_key])
llm_output = self.llm_chain.predict(
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return self._process_llm_result(llm_output, _run_manager)
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
await _run_manager.on_text(inputs[self.input_key])
llm_output = await self.llm_chain.apredict(
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return await self._aprocess_llm_result(llm_output, _run_manager)
@property
def _chain_type(self) -> str:
return "llm_math_chain"
[docs] @classmethod
def from_llm(
cls,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_math/base.html
|
947118fa9f6e-4
|
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate = PROMPT,
**kwargs: Any,
) -> LLMMathChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/llm_math/base.html
|
a3093a112b78-0
|
Source code for langchain.chains.retrieval_qa.base
"""Chain for question-answering against a vector database."""
from __future__ import annotations
import warnings
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
from langchain.prompts import PromptTemplate
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
class BaseRetrievalQA(Chain):
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine the documents."""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_source_documents: bool = False
"""Return the source documents."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
allow_population_by_field_name = True
@property
def input_keys(self) -> List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the output keys.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/retrieval_qa/base.html
|
a3093a112b78-1
|
def output_keys(self) -> List[str]:
"""Return the output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys = _output_keys + ["source_documents"]
return _output_keys
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Initialize from LLM."""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(llm=llm, prompt=_prompt)
document_prompt = PromptTemplate(
input_variables=["page_content"], template="Context:\n{page_content}"
)
combine_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name="context",
document_prompt=document_prompt,
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Load chain from chain type."""
_chain_type_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_chain(
llm, chain_type=chain_type, **_chain_type_kwargs
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@abstractmethod
def _get_docs(self, question: str) -> List[Document]:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/retrieval_qa/base.html
|
a3093a112b78-2
|
def _get_docs(self, question: str) -> List[Document]:
"""Get documents to do question answering over."""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
docs = self._get_docs(question)
answer = self.combine_documents_chain.run(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
@abstractmethod
async def _aget_docs(self, question: str) -> List[Document]:
"""Get documents to do question answering over."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/retrieval_qa/base.html
|
a3093a112b78-3
|
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
docs = await self._aget_docs(question)
answer = await self.combine_documents_chain.arun(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
[docs]class RetrievalQA(BaseRetrievalQA):
"""Chain for question-answering against an index.
Example:
.. code-block:: python
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.faiss import FAISS
from langchain.vectorstores.base import VectorStoreRetriever
retriever = VectorStoreRetriever(vectorstore=FAISS(...))
retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever)
"""
retriever: BaseRetriever = Field(exclude=True)
def _get_docs(self, question: str) -> List[Document]:
return self.retriever.get_relevant_documents(question)
async def _aget_docs(self, question: str) -> List[Document]:
return await self.retriever.aget_relevant_documents(question)
@property
def _chain_type(self) -> str:
"""Return the chain type."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/retrieval_qa/base.html
|
a3093a112b78-4
|
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa"
[docs]class VectorDBQA(BaseRetrievalQA):
"""Chain for question-answering against a vector database."""
vectorstore: VectorStore = Field(exclude=True, alias="vectorstore")
"""Vector Database to connect to."""
k: int = 4
"""Number of documents to query for."""
search_type: str = "similarity"
"""Search type to use over vectorstore. `similarity` or `mmr`."""
search_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
warnings.warn(
"`VectorDBQA` is deprecated - "
"please use `from langchain.chains import RetrievalQA`"
)
return values
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "mmr"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def _get_docs(self, question: str) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
elif self.search_type == "mmr":
docs = self.vectorstore.max_marginal_relevance_search(
question, k=self.k, **self.search_kwargs
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/retrieval_qa/base.html
|
a3093a112b78-5
|
question, k=self.k, **self.search_kwargs
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def _aget_docs(self, question: str) -> List[Document]:
raise NotImplementedError("VectorDBQA does not support async")
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "vector_db_qa"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/retrieval_qa/base.html
|
24db6450623b-0
|
Source code for langchain.chains.conversation.base
"""Chain that carries on a conversation and calls an LLM."""
from typing import Dict, List
from pydantic import Extra, Field, root_validator
from langchain.chains.conversation.prompt import PROMPT
from langchain.chains.llm import LLMChain
from langchain.memory.buffer import ConversationBufferMemory
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseMemory
[docs]class ConversationChain(LLMChain):
"""Chain to have a conversation and load context from memory.
Example:
.. code-block:: python
from langchain import ConversationChain, OpenAI
conversation = ConversationChain(llm=OpenAI())
"""
memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
"""Default memory store."""
prompt: BasePromptTemplate = PROMPT
"""Default conversation prompt to use."""
input_key: str = "input" #: :meta private:
output_key: str = "response" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Use this since so some prompt vars come from history."""
return [self.input_key]
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
memory_keys = values["memory"].memory_variables
input_key = values["input_key"]
if input_key in memory_keys:
raise ValueError(
f"The input key {input_key} was also found in the memory keys "
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/conversation/base.html
|
24db6450623b-1
|
f"The input key {input_key} was also found in the memory keys "
f"({memory_keys}) - please provide keys that don't overlap."
)
prompt_variables = values["prompt"].input_variables
expected_keys = memory_keys + [input_key]
if set(expected_keys) != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but got {memory_keys} as inputs from "
f"memory, and {input_key} as the normal input key."
)
return values
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/conversation/base.html
|
df77e24cd9d0-0
|
Source code for langchain.chains.api.base
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import Field, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.prompts import BasePromptTemplate
from langchain.requests import TextRequestsWrapper
[docs]class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question."""
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question" #: :meta private:
output_key: str = "output" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@root_validator(pre=True)
def validate_api_request_prompt(cls, values: Dict) -> Dict:
"""Check that api request prompt expects the right variables."""
input_vars = values["api_request_chain"].prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/base.html
|
df77e24cd9d0-1
|
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values["api_answer_chain"].prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
_run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
api_url = api_url.strip()
api_response = self.requests_wrapper.get(api_url)
_run_manager.on_text(
api_response, color="yellow", end="\n", verbose=self.verbose
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
async def _acall(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/base.html
|
df77e24cd9d0-2
|
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
await _run_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
api_url = api_url.strip()
api_response = await self.requests_wrapper.aget(api_url)
await _run_manager.on_text(
api_response, color="yellow", end="\n", verbose=self.verbose
)
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
[docs] @classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/base.html
|
df77e24cd9d0-3
|
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/base.html
|
8f0fcfc20bfb-0
|
Source code for langchain.chains.api.openapi.chain
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
import json
from typing import Any, Dict, List, NamedTuple, Optional, cast
from pydantic import BaseModel, Field
from requests import Response
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains.api.openapi.requests_chain import APIRequesterChain
from langchain.chains.api.openapi.response_chain import APIResponderChain
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.requests import Requests
from langchain.tools.openapi.utils.api_models import APIOperation
class _ParamMapping(NamedTuple):
"""Mapping from parameter name to parameter value."""
query_params: List[str]
body_params: List[str]
path_params: List[str]
[docs]class OpenAPIEndpointChain(Chain, BaseModel):
"""Chain interacts with an OpenAPI endpoint using natural language."""
api_request_chain: LLMChain
api_response_chain: Optional[LLMChain]
api_operation: APIOperation
requests: Requests = Field(exclude=True, default_factory=Requests)
param_mapping: _ParamMapping = Field(alias="param_mapping")
return_intermediate_steps: bool = False
instructions_key: str = "instructions" #: :meta private:
output_key: str = "output" #: :meta private:
max_text_length: Optional[int] = Field(ge=0) #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.instructions_key]
@property
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/openapi/chain.html
|
8f0fcfc20bfb-1
|
"""
return [self.instructions_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
def _construct_path(self, args: Dict[str, str]) -> str:
"""Construct the path from the deserialized input."""
path = self.api_operation.base_url + self.api_operation.path
for param in self.param_mapping.path_params:
path = path.replace(f"{{{param}}}", str(args.pop(param, "")))
return path
def _extract_query_params(self, args: Dict[str, str]) -> Dict[str, str]:
"""Extract the query params from the deserialized input."""
query_params = {}
for param in self.param_mapping.query_params:
if param in args:
query_params[param] = args.pop(param)
return query_params
def _extract_body_params(self, args: Dict[str, str]) -> Optional[Dict[str, str]]:
"""Extract the request body params from the deserialized input."""
body_params = None
if self.param_mapping.body_params:
body_params = {}
for param in self.param_mapping.body_params:
if param in args:
body_params[param] = args.pop(param)
return body_params
[docs] def deserialize_json_input(self, serialized_args: str) -> dict:
"""Use the serialized typescript dictionary.
Resolve the path, query params dict, and optional requestBody dict.
"""
args: dict = json.loads(serialized_args)
path = self._construct_path(args)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/openapi/chain.html
|
8f0fcfc20bfb-2
|
path = self._construct_path(args)
body_params = self._extract_body_params(args)
query_params = self._extract_query_params(args)
return {
"url": path,
"data": body_params,
"params": query_params,
}
def _get_output(self, output: str, intermediate_steps: dict) -> dict:
"""Return the output from the API call."""
if self.return_intermediate_steps:
return {
self.output_key: output,
"intermediate_steps": intermediate_steps,
}
else:
return {self.output_key: output}
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
intermediate_steps = {}
instructions = inputs[self.instructions_key]
instructions = instructions[: self.max_text_length]
_api_arguments = self.api_request_chain.predict_and_parse(
instructions=instructions, callbacks=_run_manager.get_child()
)
api_arguments = cast(str, _api_arguments)
intermediate_steps["request_args"] = api_arguments
_run_manager.on_text(
api_arguments, color="green", end="\n", verbose=self.verbose
)
if api_arguments.startswith("ERROR"):
return self._get_output(api_arguments, intermediate_steps)
elif api_arguments.startswith("MESSAGE:"):
return self._get_output(
api_arguments[len("MESSAGE:") :], intermediate_steps
)
try:
request_args = self.deserialize_json_input(api_arguments)
method = getattr(self.requests, self.api_operation.method.value)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/openapi/chain.html
|
8f0fcfc20bfb-3
|
method = getattr(self.requests, self.api_operation.method.value)
api_response: Response = method(**request_args)
if api_response.status_code != 200:
method_str = str(self.api_operation.method.value)
response_text = (
f"{api_response.status_code}: {api_response.reason}"
+ f"\nFor {method_str.upper()} {request_args['url']}\n"
+ f"Called with args: {request_args['params']}"
)
else:
response_text = api_response.text
except Exception as e:
response_text = f"Error with message {str(e)}"
response_text = response_text[: self.max_text_length]
intermediate_steps["response_text"] = response_text
_run_manager.on_text(
response_text, color="blue", end="\n", verbose=self.verbose
)
if self.api_response_chain is not None:
_answer = self.api_response_chain.predict_and_parse(
response=response_text,
instructions=instructions,
callbacks=_run_manager.get_child(),
)
answer = cast(str, _answer)
_run_manager.on_text(answer, color="yellow", end="\n", verbose=self.verbose)
return self._get_output(answer, intermediate_steps)
else:
return self._get_output(response_text, intermediate_steps)
[docs] @classmethod
def from_url_and_method(
cls,
spec_url: str,
path: str,
method: str,
llm: BaseLanguageModel,
requests: Optional[Requests] = None,
return_intermediate_steps: bool = False,
**kwargs: Any
# TODO: Handle async
) -> "OpenAPIEndpointChain":
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/openapi/chain.html
|
8f0fcfc20bfb-4
|
# TODO: Handle async
) -> "OpenAPIEndpointChain":
"""Create an OpenAPIEndpoint from a spec at the specified url."""
operation = APIOperation.from_openapi_url(spec_url, path, method)
return cls.from_api_operation(
operation,
requests=requests,
llm=llm,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
[docs] @classmethod
def from_api_operation(
cls,
operation: APIOperation,
llm: BaseLanguageModel,
requests: Optional[Requests] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
raw_response: bool = False,
callbacks: Callbacks = None,
**kwargs: Any
# TODO: Handle async
) -> "OpenAPIEndpointChain":
"""Create an OpenAPIEndpointChain from an operation and a spec."""
param_mapping = _ParamMapping(
query_params=operation.query_params,
body_params=operation.body_params,
path_params=operation.path_params,
)
requests_chain = APIRequesterChain.from_llm_and_typescript(
llm,
typescript_definition=operation.to_typescript(),
verbose=verbose,
callbacks=callbacks,
)
if raw_response:
response_chain = None
else:
response_chain = APIResponderChain.from_llm(
llm, verbose=verbose, callbacks=callbacks
)
_requests = requests or Requests()
return cls(
api_request_chain=requests_chain,
api_response_chain=response_chain,
api_operation=operation,
requests=_requests,
param_mapping=param_mapping,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/openapi/chain.html
|
8f0fcfc20bfb-5
|
requests=_requests,
param_mapping=param_mapping,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
callbacks=callbacks,
**kwargs,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/chains/api/openapi/chain.html
|
a4536c037b83-0
|
Source code for langchain.retrievers.chatgpt_plugin_retriever
from __future__ import annotations
from typing import List, Optional
import aiohttp
import requests
from pydantic import BaseModel
from langchain.schema import BaseRetriever, Document
[docs]class ChatGPTPluginRetriever(BaseRetriever, BaseModel):
url: str
bearer_token: str
top_k: int = 3
filter: Optional[dict] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
url, json, headers = self._create_request(query)
response = requests.post(url, json=json, headers=headers)
results = response.json()["results"][0]["results"]
docs = []
for d in results:
content = d.pop("text")
metadata = d.pop("metadata", d)
if metadata.get("source_id"):
metadata["source"] = metadata.pop("source_id")
docs.append(Document(page_content=content, metadata=metadata))
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
url, json, headers = self._create_request(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=headers, json=json) as response:
res = await response.json()
else:
async with self.aiosession.post(
url, headers=headers, json=json
) as response:
res = await response.json()
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/chatgpt_plugin_retriever.html
|
a4536c037b83-1
|
) as response:
res = await response.json()
results = res["results"][0]["results"]
docs = []
for d in results:
content = d.pop("text")
metadata = d.pop("metadata", d)
if metadata.get("source_id"):
metadata["source"] = metadata.pop("source_id")
docs.append(Document(page_content=content, metadata=metadata))
return docs
def _create_request(self, query: str) -> tuple[str, dict, dict]:
url = f"{self.url}/query"
json = {
"queries": [
{
"query": query,
"filter": self.filter,
"top_k": self.top_k,
}
]
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.bearer_token}",
}
return url, json, headers
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/chatgpt_plugin_retriever.html
|
e739168c01d9-0
|
Source code for langchain.retrievers.elastic_search_bm25
"""Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
[docs]class ElasticSearchBM25Retriever(BaseRetriever):
"""Wrapper around Elasticsearch using BM25 as a retrieval method.
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
"""
def __init__(self, client: Any, index_name: str):
self.client = client
self.index_name = index_name
[docs] @classmethod
def create(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/elastic_search_bm25.html
|
e739168c01d9-1
|
self.index_name = index_name
[docs] @classmethod
def create(
cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75
) -> ElasticSearchBM25Retriever:
from elasticsearch import Elasticsearch
# Create an Elasticsearch client instance
es = Elasticsearch(elasticsearch_url)
# Define the index settings and mappings
settings = {
"analysis": {"analyzer": {"default": {"type": "standard"}}},
"similarity": {
"custom_bm25": {
"type": "BM25",
"k1": k1,
"b": b,
}
},
}
mappings = {
"properties": {
"content": {
"type": "text",
"similarity": "custom_bm25", # Use the custom BM25 similarity
}
}
}
# Create the index with the specified settings and mappings
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(es, index_name)
[docs] def add_texts(
self,
texts: Iterable[str],
refresh_indices: bool = True,
) -> List[str]:
"""Run more texts through the embeddings and add to the retriver.
Args:
texts: Iterable of strings to add to the retriever.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the retriever.
"""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/elastic_search_bm25.html
|
e739168c01d9-2
|
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
for i, text in enumerate(texts):
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
"content": text,
"_id": _id,
}
ids.append(_id)
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
query_dict = {"query": {"match": {"content": query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res["hits"]["hits"]:
docs.append(Document(page_content=r["_source"]["content"]))
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/elastic_search_bm25.html
|
19f6955e3979-0
|
Source code for langchain.retrievers.tfidf
"""TF-IDF Retriever.
Largely based on
https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb"""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from pydantic import BaseModel
from langchain.schema import BaseRetriever, Document
[docs]class TFIDFRetriever(BaseRetriever, BaseModel):
vectorizer: Any
docs: List[Document]
tfidf_array: Any
k: int = 4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
tfidf_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> TFIDFRetriever:
try:
from sklearn.feature_extraction.text import TfidfVectorizer
except ImportError:
raise ImportError(
"Could not import scikit-learn, please install with `pip install "
"scikit-learn`."
)
tfidf_params = tfidf_params or {}
vectorizer = TfidfVectorizer(**tfidf_params)
tfidf_array = vectorizer.fit_transform(texts)
metadatas = metadatas or ({} for _ in texts)
docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)]
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/tfidf.html
|
19f6955e3979-1
|
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs)
[docs] @classmethod
def from_documents(
cls,
documents: Iterable[Document],
*,
tfidf_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> TFIDFRetriever:
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(
texts=texts, tfidf_params=tfidf_params, metadatas=metadatas, **kwargs
)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from sklearn.metrics.pairwise import cosine_similarity
query_vec = self.vectorizer.transform(
[query]
) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
results = cosine_similarity(self.tfidf_array, query_vec).reshape(
(-1,)
) # Op -- (n_docs,1) -- Cosine Sim with each doc
return_docs = [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
return return_docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/tfidf.html
|
f7819b69d7e5-0
|
Source code for langchain.retrievers.zep
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Optional
from langchain.schema import BaseRetriever, Document
if TYPE_CHECKING:
from zep_python import MemorySearchResult
[docs]class ZepRetriever(BaseRetriever):
"""A Retriever implementation for the Zep long-term memory store. Search your
user's long-term chat history with Zep.
Note: You will need to provide the user's `session_id` to use this retriever.
More on Zep:
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions, see:
https://getzep.github.io/deployment/quickstart/
"""
def __init__(
self,
session_id: str,
url: str,
top_k: Optional[int] = None,
):
try:
from zep_python import ZepClient
except ImportError:
raise ValueError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
self.zep_client = ZepClient(base_url=url)
self.session_id = session_id
self.top_k = top_k
def _search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=r.message.pop("content"),
metadata={"score": r.dist, **r.message},
)
for r in results
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/zep.html
|
f7819b69d7e5-1
|
)
for r in results
if r.message
]
[docs] def get_relevant_documents(
self, query: str, metadata: Optional[Dict] = None
) -> List[Document]:
from zep_python import MemorySearchPayload
payload: MemorySearchPayload = MemorySearchPayload(
text=query, metadata=metadata
)
results: List[MemorySearchResult] = self.zep_client.search_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
[docs] async def aget_relevant_documents(
self, query: str, metadata: Optional[Dict] = None
) -> List[Document]:
from zep_python import MemorySearchPayload
payload: MemorySearchPayload = MemorySearchPayload(
text=query, metadata=metadata
)
results: List[MemorySearchResult] = await self.zep_client.asearch_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/zep.html
|
ba158238f972-0
|
Source code for langchain.retrievers.remote_retriever
from typing import List, Optional
import aiohttp
import requests
from pydantic import BaseModel
from langchain.schema import BaseRetriever, Document
[docs]class RemoteLangChainRetriever(BaseRetriever, BaseModel):
url: str
headers: Optional[dict] = None
input_key: str = "message"
response_key: str = "response"
page_content_key: str = "page_content"
metadata_key: str = "metadata"
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
response = requests.post(
self.url, json={self.input_key: query}, headers=self.headers
)
result = response.json()
return [
Document(
page_content=r[self.page_content_key], metadata=r[self.metadata_key]
)
for r in result[self.response_key]
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST", self.url, headers=self.headers, json={self.input_key: query}
) as response:
result = await response.json()
return [
Document(
page_content=r[self.page_content_key], metadata=r[self.metadata_key]
)
for r in result[self.response_key]
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/remote_retriever.html
|
5ab5b5df176e-0
|
Source code for langchain.retrievers.aws_kendra_index_retriever
"""Retriever wrapper for AWS Kendra."""
import re
from typing import Any, Dict, List
from langchain.schema import BaseRetriever, Document
[docs]class AwsKendraIndexRetriever(BaseRetriever):
"""Wrapper around AWS Kendra."""
kendraindex: str
"""Kendra index id"""
k: int
"""Number of documents to query for."""
languagecode: str
"""Languagecode used for querying."""
kclient: Any
""" boto3 client for Kendra. """
def __init__(
self, kclient: Any, kendraindex: str, k: int = 3, languagecode: str = "en"
):
self.kendraindex = kendraindex
self.k = k
self.languagecode = languagecode
self.kclient = kclient
def _clean_result(self, res_text: str) -> str:
return re.sub("\s+", " ", res_text).replace("...", "")
def _get_top_n_results(self, resp: Dict, count: int) -> Document:
r = resp["ResultItems"][count]
doc_title = r["DocumentTitle"]["Text"]
doc_uri = r["DocumentURI"]
r_type = r["Type"]
if (
r["AdditionalAttributes"]
and r["AdditionalAttributes"][0]["Key"] == "AnswerText"
):
res_text = r["AdditionalAttributes"][0]["Value"]["TextWithHighlightsValue"][
"Text"
]
else:
res_text = r["DocumentExcerpt"]["Text"]
doc_excerpt = self._clean_result(res_text)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/aws_kendra_index_retriever.html
|
5ab5b5df176e-1
|
doc_excerpt = self._clean_result(res_text)
combined_text = f"""Document Title: {doc_title}
Document Excerpt: {doc_excerpt}
"""
return Document(
page_content=combined_text,
metadata={
"source": doc_uri,
"title": doc_title,
"excerpt": doc_excerpt,
"type": r_type,
},
)
def _kendra_query(self, kquery: str) -> List[Document]:
response = self.kclient.query(
IndexId=self.kendraindex,
QueryText=kquery.strip(),
AttributeFilter={
"AndAllFilters": [
{
"EqualsTo": {
"Key": "_language_code",
"Value": {
"StringValue": self.languagecode,
},
}
}
]
},
)
if len(response["ResultItems"]) > self.k:
r_count = self.k
else:
r_count = len(response["ResultItems"])
return [self._get_top_n_results(response, i) for i in range(0, r_count)]
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Run search on Kendra index and get top k documents
docs = get_relevant_documents('This is my query')
"""
return self._kendra_query(query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError("AwsKendraIndexRetriever does not support async")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/aws_kendra_index_retriever.html
|
67cb7bb02754-0
|
Source code for langchain.retrievers.svm
"""SMV Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
[docs]class SVMRetriever(BaseRetriever, BaseModel):
embeddings: Embeddings
index: Any
texts: List[str]
k: int = 4
relevancy_threshold: Optional[float] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) -> SVMRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from sklearn import svm
query_embeds = np.array(self.embeddings.embed_query(query))
x = np.concatenate([query_embeds[None, ...], self.index])
y = np.zeros(x.shape[0])
y[0] = 1
clf = svm.LinearSVC(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/svm.html
|
67cb7bb02754-1
|
y[0] = 1
clf = svm.LinearSVC(
class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1
)
clf.fit(x, y)
similarities = clf.decision_function(x)
sorted_ix = np.argsort(-similarities)
# svm.LinearSVC in scikit-learn is non-deterministic.
# if a text is the same as a query, there is no guarantee
# the query will be in the first index.
# this performs a simple swap, this works because anything
# left of the 0 should be equivalent.
zero_index = np.where(sorted_ix == 0)[0][0]
if zero_index != 0:
sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0]
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = []
for row in sorted_ix[1 : self.k + 1]:
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
):
top_k_results.append(Document(page_content=self.texts[row - 1]))
return top_k_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/svm.html
|
dfb467d7ec76-0
|
Source code for langchain.retrievers.contextual_compression
"""Retriever that wraps a base retriever and filters the results."""
from typing import List
from pydantic import BaseModel, Extra
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.schema import BaseRetriever, Document
[docs]class ContextualCompressionRetriever(BaseRetriever, BaseModel):
"""Retriever that wraps a base retriever and compresses the results."""
base_compressor: BaseDocumentCompressor
"""Compressor for compressing retrieved documents."""
base_retriever: BaseRetriever
"""Base Retriever to use for getting relevant documents."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.get_relevant_documents(query)
compressed_docs = self.base_compressor.compress_documents(docs, query)
return list(compressed_docs)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
docs = await self.base_retriever.aget_relevant_documents(query)
compressed_docs = await self.base_compressor.acompress_documents(docs, query)
return list(compressed_docs)
By Harrison Chase
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/contextual_compression.html
|
dfb467d7ec76-1
|
return list(compressed_docs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/contextual_compression.html
|
bb8e89143840-0
|
Source code for langchain.retrievers.time_weighted_retriever
"""Retriever that combines embedding similarity with recency in retrieving values."""
import datetime
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float:
"""Get the hours passed between two datetime objects."""
return (time - ref_time).total_seconds() / 3600
[docs]class TimeWeightedVectorStoreRetriever(BaseRetriever, BaseModel):
"""Retriever combining embedding similarity with recency."""
vectorstore: VectorStore
"""The vectorstore to store documents and determine salience."""
search_kwargs: dict = Field(default_factory=lambda: dict(k=100))
"""Keyword arguments to pass to the vectorstore similarity search."""
# TODO: abstract as a queue
memory_stream: List[Document] = Field(default_factory=list)
"""The memory_stream of documents to search through."""
decay_rate: float = Field(default=0.01)
"""The exponential decay factor used as (1.0-decay_rate)**(hrs_passed)."""
k: int = 4
"""The maximum number of documents to retrieve in a given call."""
other_score_keys: List[str] = []
"""Other keys in the metadata to factor into the score, e.g. 'importance'."""
default_salience: Optional[float] = None
"""The salience to assign memories not retrieved from the vector store.
None assigns no salience to documents not fetched from the vector store.
"""
class Config:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/time_weighted_retriever.html
|
bb8e89143840-1
|
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _get_combined_score(
self,
document: Document,
vector_relevance: Optional[float],
current_time: datetime.datetime,
) -> float:
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(
current_time,
document.metadata["last_accessed_at"],
)
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
[docs] def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]:
"""Return documents that are salient to the query."""
docs_and_scores: List[Tuple[Document, float]]
docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
results = {}
for fetched_doc, relevance in docs_and_scores:
if "buffer_idx" in fetched_doc.metadata:
buffer_idx = fetched_doc.metadata["buffer_idx"]
doc = self.memory_stream[buffer_idx]
results[buffer_idx] = (doc, relevance)
return results
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Return documents that are relevant to the query."""
current_time = datetime.datetime.now()
docs_and_scores = {
doc.metadata["buffer_idx"]: (doc, self.default_salience)
for doc in self.memory_stream[-self.k :]
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/time_weighted_retriever.html
|
bb8e89143840-2
|
for doc in self.memory_stream[-self.k :]
}
# If a doc is considered salient, update the salience score
docs_and_scores.update(self.get_salient_docs(query))
rescored_docs = [
(doc, self._get_combined_score(doc, relevance, current_time))
for doc, relevance in docs_and_scores.values()
]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
# Ensure frequently accessed memories aren't forgotten
for doc, _ in rescored_docs[: self.k]:
# TODO: Update vector store doc once `update` method is exposed.
buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]]
buffered_doc.metadata["last_accessed_at"] = current_time
result.append(buffered_doc)
return result
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
"""Return documents that are relevant to the query."""
raise NotImplementedError
[docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/time_weighted_retriever.html
|
bb8e89143840-3
|
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return self.vectorstore.add_documents(dup_docs, **kwargs)
[docs] async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/time_weighted_retriever.html
|
f33751026e13-0
|
Source code for langchain.retrievers.knn
"""KNN Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
[docs]class KNNRetriever(BaseRetriever, BaseModel):
embeddings: Embeddings
index: Any
texts: List[str]
k: int = 4
relevancy_threshold: Optional[float] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) -> KNNRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
query_embeds = np.array(self.embeddings.embed_query(query))
# calc L2 norm
index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True))
query_embeds = query_embeds / np.sqrt((query_embeds**2).sum())
similarities = index_embeds.dot(query_embeds)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/knn.html
|
f33751026e13-1
|
similarities = index_embeds.dot(query_embeds)
sorted_ix = np.argsort(-similarities)
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = [
Document(page_content=self.texts[row])
for row in sorted_ix[0 : self.k]
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
)
]
return top_k_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/knn.html
|
a9c0445e574d-0
|
Source code for langchain.retrievers.azure_cognitive_search
"""Retriever wrapper for Azure Cognitive Search."""
from __future__ import annotations
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
[docs]class AzureCognitiveSearchRetriever(BaseRetriever, BaseModel):
"""Wrapper around Azure Cognitive Search."""
service_name: str = ""
"""Name of Azure Cognitive Search service"""
index_name: str = ""
"""Name of Index inside Azure Cognitive Search service"""
api_key: str = ""
"""API Key. Both Admin and Query keys work, but for reading data it's
recommended to use a Query key."""
api_version: str = "2020-06-30"
"""API version"""
aiosession: Optional[aiohttp.ClientSession] = None
"""ClientSession, in case we want to reuse connection for better performance."""
content_key: str = "content"
"""Key in a retrieved result to set as the Document page_content."""
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that service name, index name and api key exists in environment."""
values["service_name"] = get_from_dict_or_env(
values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME"
)
values["index_name"] = get_from_dict_or_env(
values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME"
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/azure_cognitive_search.html
|
a9c0445e574d-1
|
)
values["api_key"] = get_from_dict_or_env(
values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY"
)
return values
def _build_search_url(self, query: str) -> str:
base_url = f"https://{self.service_name}.search.windows.net/"
endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}"
return base_url + endpoint_path + f"&search={query}"
@property
def _headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"api-key": self.api_key,
}
def _search(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f"Error in search request: {response}")
return json.loads(response.text)["value"]
async def _asearch(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self._headers) as response:
response_json = await response.json()
else:
async with self.aiosession.get(
search_url, headers=self._headers
) as response:
response_json = await response.json()
return response_json["value"]
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
search_results = self._search(query)
return [
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/azure_cognitive_search.html
|
a9c0445e574d-2
|
search_results = self._search(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
search_results = await self._asearch(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/azure_cognitive_search.html
|
c7671d075d44-0
|
Source code for langchain.retrievers.weaviate_hybrid_search
"""Wrapper around weaviate vector database."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from uuid import uuid4
from pydantic import Extra
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
[docs]class WeaviateHybridSearchRetriever(BaseRetriever):
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
alpha: float = 0.5,
k: int = 4,
attributes: Optional[List[str]] = None,
create_schema_if_missing: bool = True,
):
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self.k = k
self.alpha = alpha
self._index_name = index_name
self._text_key = text_key
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
if create_schema_if_missing:
self._create_schema_if_missing()
def _create_schema_if_missing(self) -> None:
class_obj = {
"class": self._index_name,
"properties": [{"name": self._text_key, "dataType": ["text"]}],
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/weaviate_hybrid_search.html
|
c7671d075d44-1
|
"properties": [{"name": self._text_key, "dataType": ["text"]}],
"vectorizer": "text2vec-openai",
}
if not self._client.schema.exists(self._index_name):
self._client.schema.create_class(class_obj)
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
# added text_key
[docs] def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]:
"""Upload documents to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(docs):
metadata = doc.metadata or {}
data_properties = {self._text_key: doc.page_content, **metadata}
# If the UUID of one of the objects already exists
# then the existing objectwill be replaced by the new object.
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
else:
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
[docs] def get_relevant_documents(
self, query: str, where_filter: Optional[Dict[str, object]] = None
) -> List[Document]:
"""Look up similar documents in Weaviate."""
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if where_filter:
query_obj = query_obj.with_where(where_filter)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/weaviate_hybrid_search.html
|
c7671d075d44-2
|
if where_filter:
query_obj = query_obj.with_where(where_filter)
result = query_obj.with_hybrid(query, alpha=self.alpha).with_limit(self.k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
[docs] async def aget_relevant_documents(
self, query: str, where_filter: Optional[Dict[str, object]] = None
) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/weaviate_hybrid_search.html
|
daed6da266ed-0
|
Source code for langchain.retrievers.pinecone_hybrid_search
"""Taken from: https://docs.pinecone.io/docs/hybrid-search"""
import hashlib
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def hash_text(text: str) -> str:
return str(hashlib.sha256(text.encode("utf-8")).hexdigest())
def create_index(
contexts: List[str],
index: Any,
embeddings: Embeddings,
sparse_encoder: Any,
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
batch_size = 32
_iterator = range(0, len(contexts), batch_size)
try:
from tqdm.auto import tqdm
_iterator = tqdm(_iterator)
except ImportError:
pass
if ids is None:
# create unique ids using hash of the text
ids = [hash_text(context) for context in contexts]
for i in _iterator:
# find end of batch
i_end = min(i + batch_size, len(contexts))
# extract batch
context_batch = contexts[i:i_end]
batch_ids = ids[i:i_end]
metadata_batch = (
metadatas[i:i_end] if metadatas else [{} for _ in context_batch]
)
# add context passages as metadata
meta = [
{"context": context, **metadata}
for context, metadata in zip(context_batch, metadata_batch)
]
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/pinecone_hybrid_search.html
|
daed6da266ed-1
|
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch)
# create sparse vectors
sparse_embeds = sparse_encoder.encode_documents(context_batch)
for s in sparse_embeds:
s["values"] = [float(s1) for s1 in s["values"]]
vectors = []
# loop through the data and create dictionaries for upserts
for doc_id, sparse, dense, metadata in zip(
batch_ids, sparse_embeds, dense_embeds, meta
):
vectors.append(
{
"id": doc_id,
"sparse_values": sparse,
"values": dense,
"metadata": metadata,
}
)
# upload the documents to the new hybrid index
index.upsert(vectors)
[docs]class PineconeHybridSearchRetriever(BaseRetriever, BaseModel):
embeddings: Embeddings
sparse_encoder: Any
index: Any
top_k: int = 4
alpha: float = 0.5
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def add_texts(
self,
texts: List[str],
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
create_index(
texts,
self.index,
self.embeddings,
self.sparse_encoder,
ids=ids,
metadatas=metadatas,
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/pinecone_hybrid_search.html
|
daed6da266ed-2
|
"""Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401
from pinecone_text.sparse.base_sparse_encoder import (
BaseSparseEncoder, # noqa:F401
)
except ImportError:
raise ValueError(
"Could not import pinecone_text python package. "
"Please install it with `pip install pinecone_text`."
)
return values
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
# convert the question into a dense vector
dense_vec = self.embeddings.embed_query(query)
# scale alpha with hybrid_scale
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha)
sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]]
# query pinecone with the query parameters
result = self.index.query(
vector=dense_vec,
sparse_vector=sparse_vec,
top_k=self.top_k,
include_metadata=True,
)
final_result = []
for res in result["matches"]:
context = res["metadata"].pop("context")
final_result.append(
Document(page_content=context, metadata=res["metadata"])
)
# return search results as json
return final_result
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/pinecone_hybrid_search.html
|
cdf5c77bf365-0
|
Source code for langchain.retrievers.vespa_retriever
"""Wrapper for retrieving documents from Vespa."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Sequence, Union
from langchain.schema import BaseRetriever, Document
if TYPE_CHECKING:
from vespa.application import Vespa
[docs]class VespaRetriever(BaseRetriever):
def __init__(
self,
app: Vespa,
body: Dict,
content_field: str,
metadata_fields: Optional[Sequence[str]] = None,
):
self._application = app
self._query_body = body
self._content_field = content_field
self._metadata_fields = metadata_fields or ()
def _query(self, body: Dict) -> List[Document]:
response = self._application.query(body)
if not str(response.status_code).startswith("2"):
raise RuntimeError(
"Could not retrieve data from Vespa. Error code: {}".format(
response.status_code
)
)
root = response.json["root"]
if "errors" in root:
raise RuntimeError(json.dumps(root["errors"]))
docs = []
for child in response.hits:
page_content = child["fields"].pop(self._content_field, "")
if self._metadata_fields == "*":
metadata = child["fields"]
else:
metadata = {mf: child["fields"].get(mf) for mf in self._metadata_fields}
metadata["id"] = child["id"]
docs.append(Document(page_content=page_content, metadata=metadata))
return docs
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/vespa_retriever.html
|
cdf5c77bf365-1
|
docs.append(Document(page_content=page_content, metadata=metadata))
return docs
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
body = self._query_body.copy()
body["query"] = query
return self._query(body)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
[docs] def get_relevant_documents_with_filter(
self, query: str, *, _filter: Optional[str] = None
) -> List[Document]:
body = self._query_body.copy()
_filter = f" and {_filter}" if _filter else ""
body["yql"] = body["yql"] + _filter
body["query"] = query
return self._query(body)
[docs] @classmethod
def from_params(
cls,
url: str,
content_field: str,
*,
k: Optional[int] = None,
metadata_fields: Union[Sequence[str], Literal["*"]] = (),
sources: Union[Sequence[str], Literal["*"], None] = None,
_filter: Optional[str] = None,
yql: Optional[str] = None,
**kwargs: Any,
) -> VespaRetriever:
"""Instantiate retriever from params.
Args:
url (str): Vespa app URL.
content_field (str): Field in results to return as Document page_content.
k (Optional[int]): Number of Documents to return. Defaults to None.
metadata_fields(Sequence[str] or "*"): Fields in results to include in
document metadata. Defaults to empty tuple ().
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/vespa_retriever.html
|
cdf5c77bf365-2
|
document metadata. Defaults to empty tuple ().
sources (Sequence[str] or "*" or None): Sources to retrieve
from. Defaults to None.
_filter (Optional[str]): Document filter condition expressed in YQL.
Defaults to None.
yql (Optional[str]): Full YQL query to be used. Should not be specified
if _filter or sources are specified. Defaults to None.
kwargs (Any): Keyword arguments added to query body.
"""
try:
from vespa.application import Vespa
except ImportError:
raise ImportError(
"pyvespa is not installed, please install with `pip install pyvespa`"
)
app = Vespa(url)
body = kwargs.copy()
if yql and (sources or _filter):
raise ValueError(
"yql should only be specified if both sources and _filter are not "
"specified."
)
else:
if metadata_fields == "*":
_fields = "*"
body["summary"] = "short"
else:
_fields = ", ".join([content_field] + list(metadata_fields or []))
_sources = ", ".join(sources) if isinstance(sources, Sequence) else "*"
_filter = f" and {_filter}" if _filter else ""
yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}"
body["yql"] = yql
if k:
body["hits"] = k
return cls(app, body, content_field, metadata_fields=metadata_fields)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/vespa_retriever.html
|
d05d3131bd44-0
|
Source code for langchain.retrievers.pupmed
from typing import List
from langchain.schema import BaseRetriever, Document
from langchain.utilities.pupmed import PubMedAPIWrapper
[docs]class PubMedRetriever(BaseRetriever, PubMedAPIWrapper):
"""
It is effectively a wrapper for PubMedAPIWrapper.
It wraps load() to get_relevant_documents().
It uses all PubMedAPIWrapper arguments without any change.
"""
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.load_docs(query=query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/pupmed.html
|
d336c3d5ecde-0
|
Source code for langchain.retrievers.arxiv
from typing import List
from langchain.schema import BaseRetriever, Document
from langchain.utilities.arxiv import ArxivAPIWrapper
[docs]class ArxivRetriever(BaseRetriever, ArxivAPIWrapper):
"""
It is effectively a wrapper for ArxivAPIWrapper.
It wraps load() to get_relevant_documents().
It uses all ArxivAPIWrapper arguments without any change.
"""
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.load(query=query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/arxiv.html
|
2448275fad5e-0
|
Source code for langchain.retrievers.databerry
from typing import List, Optional
import aiohttp
import requests
from langchain.schema import BaseRetriever, Document
[docs]class DataberryRetriever(BaseRetriever):
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def __init__(
self,
datastore_url: str,
top_k: Optional[int] = None,
api_key: Optional[str] = None,
):
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/databerry.html
|
2448275fad5e-1
|
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/databerry.html
|
655fad512421-0
|
Source code for langchain.retrievers.wikipedia
from typing import List
from langchain.schema import BaseRetriever, Document
from langchain.utilities.wikipedia import WikipediaAPIWrapper
[docs]class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper):
"""
It is effectively a wrapper for WikipediaAPIWrapper.
It wraps load() to get_relevant_documents().
It uses all WikipediaAPIWrapper arguments without any change.
"""
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.load(query=query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/wikipedia.html
|
0910faed59a3-0
|
Source code for langchain.retrievers.merger_retriever
from typing import List
from langchain.schema import BaseRetriever, Document
[docs]class MergerRetriever(BaseRetriever):
"""
This class merges the results of multiple retrievers.
Args:
retrievers: A list of retrievers to merge.
"""
def __init__(
self,
retrievers: List[BaseRetriever],
):
"""
Initialize the MergerRetriever class.
Args:
retrievers: A list of retrievers to merge.
"""
self.retrievers = retrievers
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = self.merge_documents(query)
return merged_documents
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = await self.amerge_documents(query)
return merged_documents
[docs] def merge_documents(self, query: str) -> List[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/merger_retriever.html
|
0910faed59a3-1
|
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.get_relevant_documents(query) for retriever in self.retrievers
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(len(docs) for docs in retriever_docs)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
[docs] async def amerge_documents(self, query: str) -> List[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
await retriever.aget_relevant_documents(query)
for retriever in self.retrievers
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(len(docs) for docs in retriever_docs)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/merger_retriever.html
|
60d44a6814ff-0
|
Source code for langchain.retrievers.metal
from typing import Any, List, Optional
from langchain.schema import BaseRetriever, Document
[docs]class MetalRetriever(BaseRetriever):
def __init__(self, client: Any, params: Optional[dict] = None):
from metal_sdk.metal import Metal
if not isinstance(client, Metal):
raise ValueError(
"Got unexpected client, should be of type metal_sdk.metal.Metal. "
f"Instead, got {type(client)}"
)
self.client: Metal = client
self.params = params or {}
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
results = self.client.search({"text": query}, **self.params)
final_results = []
for r in results["data"]:
metadata = {k: v for k, v in r.items() if k != "text"}
final_results.append(Document(page_content=r["text"], metadata=metadata))
return final_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/metal.html
|
10f467bd2847-0
|
Source code for langchain.retrievers.self_query.base
"""Retriever that generates and executes structured queries over its own data source."""
from typing import Any, Dict, List, Optional, Type, cast
from pydantic import BaseModel, Field, root_validator
from langchain import LLMChain
from langchain.base_language import BaseLanguageModel
from langchain.chains.query_constructor.base import load_query_constructor_chain
from langchain.chains.query_constructor.ir import StructuredQuery, Visitor
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.chroma import ChromaTranslator
from langchain.retrievers.self_query.pinecone import PineconeTranslator
from langchain.retrievers.self_query.qdrant import QdrantTranslator
from langchain.retrievers.self_query.weaviate import WeaviateTranslator
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores import Chroma, Pinecone, Qdrant, VectorStore, Weaviate
def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
"""Get the translator class corresponding to the vector store class."""
vectorstore_cls = vectorstore.__class__
BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = {
Pinecone: PineconeTranslator,
Chroma: ChromaTranslator,
Weaviate: WeaviateTranslator,
Qdrant: QdrantTranslator,
}
if vectorstore_cls not in BUILTIN_TRANSLATORS:
raise ValueError(
f"Self query retriever with Vector Store type {vectorstore_cls}"
f" not supported."
)
if isinstance(vectorstore, Qdrant):
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/self_query/base.html
|
10f467bd2847-1
|
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
return BUILTIN_TRANSLATORS[vectorstore_cls]()
[docs]class SelfQueryRetriever(BaseRetriever, BaseModel):
"""Retriever that wraps around a vector store and uses an LLM to generate
the vector store queries."""
vectorstore: VectorStore
"""The underlying vector store from which documents will be retrieved."""
llm_chain: LLMChain
"""The LLMChain for generating the vector store queries."""
search_type: str = "similarity"
"""The search type to perform on the vector store."""
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass in to the vector store search."""
structured_query_translator: Visitor
"""Translator for turning internal query language into vectorstore search params."""
verbose: bool = False
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_translator(cls, values: Dict) -> Dict:
"""Validate translator."""
if "structured_query_translator" not in values:
values["structured_query_translator"] = _get_builtin_translator(
values["vectorstore"]
)
return values
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
inputs = self.llm_chain.prep_inputs({"query": query})
structured_query = cast(
StructuredQuery, self.llm_chain.predict_and_parse(callbacks=None, **inputs)
)
if self.verbose:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/self_query/base.html
|
10f467bd2847-2
|
)
if self.verbose:
print(structured_query)
new_query, new_kwargs = self.structured_query_translator.visit_structured_query(
structured_query
)
if structured_query.limit is not None:
new_kwargs["k"] = structured_query.limit
search_kwargs = {**self.search_kwargs, **new_kwargs}
docs = self.vectorstore.search(new_query, self.search_type, **search_kwargs)
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
document_contents: str,
metadata_field_info: List[AttributeInfo],
structured_query_translator: Optional[Visitor] = None,
chain_kwargs: Optional[Dict] = None,
enable_limit: bool = False,
**kwargs: Any,
) -> "SelfQueryRetriever":
if structured_query_translator is None:
structured_query_translator = _get_builtin_translator(vectorstore)
chain_kwargs = chain_kwargs or {}
if "allowed_comparators" not in chain_kwargs:
chain_kwargs[
"allowed_comparators"
] = structured_query_translator.allowed_comparators
if "allowed_operators" not in chain_kwargs:
chain_kwargs[
"allowed_operators"
] = structured_query_translator.allowed_operators
llm_chain = load_query_constructor_chain(
llm,
document_contents,
metadata_field_info,
enable_limit=enable_limit,
**chain_kwargs,
)
return cls(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/self_query/base.html
|
10f467bd2847-3
|
**chain_kwargs,
)
return cls(
llm_chain=llm_chain,
vectorstore=vectorstore,
structured_query_translator=structured_query_translator,
**kwargs,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/self_query/base.html
|
d29c02dfb229-0
|
Source code for langchain.retrievers.document_compressors.base
"""Interface for retrieved document compressors."""
from abc import ABC, abstractmethod
from typing import List, Sequence, Union
from pydantic import BaseModel
from langchain.schema import BaseDocumentTransformer, Document
class BaseDocumentCompressor(BaseModel, ABC):
"""Base abstraction interface for document compression."""
@abstractmethod
def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
@abstractmethod
async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
[docs]class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of transformers."""
transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/document_compressors/base.html
|
d29c02dfb229-1
|
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/document_compressors/base.html
|
f11df7c5b63e-0
|
Source code for langchain.retrievers.document_compressors.cohere_rerank
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Sequence
from pydantic import Extra, root_validator
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.schema import Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from cohere import Client
else:
# We do to avoid pydantic annotation issues when actually instantiating
# while keeping this import optional
try:
from cohere import Client
except ImportError:
pass
[docs]class CohereRerank(BaseDocumentCompressor):
client: Client
top_n: int = 3
model: str = "rerank-english-v2.0"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
if len(documents) == 0: # to avoid empty api call
return []
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
|
f11df7c5b63e-1
|
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(
model=self.model, query=query, documents=_docs, top_n=self.top_n
)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata["relevance_score"] = r.relevance_score
final_results.append(doc)
return final_results
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
|
e81835ae1e31-0
|
Source code for langchain.retrievers.document_compressors.chain_extract
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
import asyncio
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import LLMChain, PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
from langchain.schema import BaseOutputParser, Document
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
[docs]class LLMChainExtractor(BaseDocumentCompressor):
llm_chain: LLMChain
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
[docs] def compress_documents(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/retrievers/document_compressors/chain_extract.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.