id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
f26a4f61091c-0
Source code for langchain.agents.chat.output_parser import json import re from typing import Union from langchain.agents.agent import AgentOutputParser from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS from langchain.schema import AgentAction, AgentFinish, OutputParserException FINAL_ANSWER_ACTION = "Final Answer:" [docs]class ChatOutputParser(AgentOutputParser): """Output parser for the chat agent.""" pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL) """Regex pattern to parse the output.""" [docs] def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS [docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text try: found = self.pattern.search(text) if not found: # Fast fail to parse Final Answer. raise ValueError("action not found") action = found.group(1) response = json.loads(action.strip()) includes_action = "action" in response if includes_answer and includes_action: raise OutputParserException( "Parsing LLM output produced a final answer " f"and a parse-able action: {text}" ) return AgentAction( response["action"], response.get("action_input", {}), text ) except Exception: if not includes_answer: raise OutputParserException(f"Could not parse LLM output: {text}") output = text.split(FINAL_ANSWER_ACTION)[-1].strip() return AgentFinish({"output": output}, text) @property def _type(self) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/output_parser.html
f26a4f61091c-1
@property def _type(self) -> str: return "chat"
https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/output_parser.html
2e91e2eacc57-0
Source code for langchain.agents.conversational_chat.base """An agent designed to hold a conversation in addition to using tools.""" from __future__ import annotations from typing import Any, List, Optional, Sequence, Tuple from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.conversational_chat.output_parser import ConvoOutputParser from langchain.agents.conversational_chat.prompt import ( PREFIX, SUFFIX, TEMPLATE_TOOL_RESPONSE, ) from langchain.agents.utils import validate_tools_single_input from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ) from langchain.schema import AgentAction, BaseOutputParser, BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage from langchain.tools.base import BaseTool [docs]class ConversationalChatAgent(Agent): """An agent designed to hold a conversation in addition to using tools.""" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) template_tool_response: str = TEMPLATE_TOOL_RESPONSE @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ConvoOutputParser() @property def _agent_type(self) -> str: raise NotImplementedError @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with."""
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
2e91e2eacc57-1
"""Prefix to append the llm call with.""" return "Thought:" @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(cls.__name__, tools) [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: Optional[List[str]] = None, output_parser: Optional[BaseOutputParser] = None, ) -> BasePromptTemplate: tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools] ) tool_names = ", ".join([tool.name for tool in tools]) _output_parser = output_parser or cls._get_default_output_parser() format_instructions = human_message.format( format_instructions=_output_parser.get_format_instructions() ) final_prompt = format_instructions.format( tool_names=tool_names, tools=tool_strings ) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] messages = [ SystemMessagePromptTemplate.from_template(system_message), MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template(final_prompt), MessagesPlaceholder(variable_name="agent_scratchpad"), ] return ChatPromptTemplate(input_variables=input_variables, messages=messages) def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> List[BaseMessage]: """Construct the scratchpad that lets the agent continue its thought process."""
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
2e91e2eacc57-2
"""Construct the scratchpad that lets the agent continue its thought process.""" thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage( content=self.template_tool_response.format(observation=observation) ) thoughts.append(human_message) return thoughts [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) _output_parser = output_parser or cls._get_default_output_parser() prompt = cls.create_prompt( tools, system_message=system_message, human_message=human_message, input_variables=input_variables, output_parser=_output_parser, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html
0acadc827a52-0
Source code for langchain.agents.conversational_chat.output_parser from __future__ import annotations from typing import Union from langchain.agents import AgentOutputParser from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS from langchain.output_parsers.json import parse_json_markdown from langchain.schema import AgentAction, AgentFinish, OutputParserException # Define a class that parses output for conversational agents [docs]class ConvoOutputParser(AgentOutputParser): """Output parser for the conversational agent.""" [docs] def get_format_instructions(self) -> str: """Returns formatting instructions for the given output parser.""" return FORMAT_INSTRUCTIONS [docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]: """Attempts to parse the given text into an AgentAction or AgentFinish. Raises: OutputParserException if parsing fails. """ try: # Attempt to parse the text into a structured format (assumed to be JSON # stored as markdown) response = parse_json_markdown(text) # If the response contains an 'action' and 'action_input' if "action" in response and "action_input" in response: action, action_input = response["action"], response["action_input"] # If the action indicates a final answer, return an AgentFinish if action == "Final Answer": return AgentFinish({"output": action_input}, text) else: # Otherwise, return an AgentAction with the specified action and # input return AgentAction(action, action_input, text) else: # If the necessary keys aren't present in the response, raise an # exception raise OutputParserException(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/output_parser.html
0acadc827a52-1
# exception raise OutputParserException( f"Missing 'action' or 'action_input' in LLM output: {text}" ) except Exception as e: # If any other exception is raised during parsing, also raise an # OutputParserException raise OutputParserException(f"Could not parse LLM output: {text}") from e @property def _type(self) -> str: return "conversational_chat"
https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/output_parser.html
c14a5cd6965c-0
Source code for langchain.agents.mrkl.base """Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf.""" from __future__ import annotations from typing import Any, Callable, List, NamedTuple, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.output_parser import MRKLOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool [docs]class ChainConfig(NamedTuple): """Configuration for chain to use in MRKL system. Args: action_name: Name of the action. action: Action function to call. action_description: Description of the action. """ action_name: str action: Callable action_description: str [docs]class ZeroShotAgent(Agent): """Agent for the MRKL chain.""" output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return MRKLOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.ZERO_SHOT_REACT_DESCRIPTION @property def observation_prefix(self) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
c14a5cd6965c-1
@property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool],
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
c14a5cd6965c-2
llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: validate_tools_single_input(cls.__name__, tools) if len(tools) == 0: raise ValueError( f"Got no tools for {cls.__name__}. At least one tool must be provided." ) for tool in tools: if tool.description is None: raise ValueError( f"Got a tool {tool.name} without a description. For this agent, " f"a description must always be provided." )
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
c14a5cd6965c-3
f"a description must always be provided." ) super()._validate_tools(tools) [docs]class MRKLChain(AgentExecutor): """Chain that implements the MRKL system. Example: .. code-block:: python from langchain import OpenAI, MRKLChain from langchain.chains.mrkl.base import ChainConfig llm = OpenAI(temperature=0) prompt = PromptTemplate(...) chains = [...] mrkl = MRKLChain.from_chains(llm=llm, prompt=prompt) """ [docs] @classmethod def from_chains( cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any ) -> AgentExecutor: """User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the MRKL chain. Args: llm: The LLM to use as the agent LLM. chains: The chains the MRKL system has access to. **kwargs: parameters to be passed to initialization. Returns: An initialized MRKL chain. Example: .. code-block:: python from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, MRKLChain from langchain.chains.mrkl.base import ChainConfig llm = OpenAI(temperature=0) search = SerpAPIWrapper() llm_math_chain = LLMMathChain(llm=llm) chains = [ ChainConfig( action_name = "Search", action=search.search, action_description="useful for searching" ), ChainConfig(
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
c14a5cd6965c-4
action_description="useful for searching" ), ChainConfig( action_name="Calculator", action=llm_math_chain.run, action_description="useful for doing math" ) ] mrkl = MRKLChain.from_chains(llm, chains) """ tools = [ Tool( name=c.action_name, func=c.action, description=c.action_description, ) for c in chains ] agent = ZeroShotAgent.from_llm_and_tools(llm, tools) return cls(agent=agent, tools=tools, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
0507e559e3bd-0
Source code for langchain.agents.mrkl.output_parser import re from typing import Union from langchain.agents.agent import AgentOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.schema import AgentAction, AgentFinish, OutputParserException FINAL_ANSWER_ACTION = "Final Answer:" MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action:' after 'Thought:" ) MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action Input:' after 'Action:'" ) FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = ( "Parsing LLM output produced both a final answer and a parse-able action:" ) [docs]class MRKLOutputParser(AgentOutputParser): """MRKL Output parser for the chat agent.""" [docs] def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS [docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text regex = ( r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" ) action_match = re.search(regex, text, re.DOTALL) if action_match: if includes_answer: raise OutputParserException( f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}" ) action = action_match.group(1).strip() action_input = action_match.group(2) tool_input = action_input.strip(" ") # ensure if its a well formed SQL query we don't remove any trailing " chars
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/output_parser.html
0507e559e3bd-1
# ensure if its a well formed SQL query we don't remove any trailing " chars if tool_input.startswith("SELECT ") is False: tool_input = tool_input.strip('"') return AgentAction(action, tool_input, text) elif includes_answer: return AgentFinish( {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text ) if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) elif not re.search( r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL ): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) else: raise OutputParserException(f"Could not parse LLM output: `{text}`") @property def _type(self) -> str: return "mrkl"
https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/output_parser.html
3ebaa66ac351-0
Source code for langchain.retrievers.azure_cognitive_search """Retriever for the Azure Cognitive Search service.""" from __future__ import annotations import json from typing import Dict, List, Optional import aiohttp import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.schema import BaseRetriever, Document from langchain.utils import get_from_dict_or_env [docs]class AzureCognitiveSearchRetriever(BaseRetriever): """Retriever for the Azure Cognitive Search service.""" service_name: str = "" """Name of Azure Cognitive Search service""" index_name: str = "" """Name of Index inside Azure Cognitive Search service""" api_key: str = "" """API Key. Both Admin and Query keys work, but for reading data it's recommended to use a Query key.""" api_version: str = "2020-06-30" """API version""" aiosession: Optional[aiohttp.ClientSession] = None """ClientSession, in case we want to reuse connection for better performance.""" content_key: str = "content" """Key in a retrieved result to set as the Document page_content.""" top_k: Optional[int] = None """Number of results to retrieve. Set to None to retrieve all results.""" class Config: extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that service name, index name and api key exists in environment.""" values["service_name"] = get_from_dict_or_env(
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
3ebaa66ac351-1
values["service_name"] = get_from_dict_or_env( values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME" ) values["index_name"] = get_from_dict_or_env( values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME" ) values["api_key"] = get_from_dict_or_env( values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY" ) return values def _build_search_url(self, query: str) -> str: base_url = f"https://{self.service_name}.search.windows.net/" endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}" top_param = f"&$top={self.top_k}" if self.top_k else "" return base_url + endpoint_path + f"&search={query}" + top_param @property def _headers(self) -> Dict[str, str]: return { "Content-Type": "application/json", "api-key": self.api_key, } def _search(self, query: str) -> List[dict]: search_url = self._build_search_url(query) response = requests.get(search_url, headers=self._headers) if response.status_code != 200: raise Exception(f"Error in search request: {response}") return json.loads(response.text)["value"] async def _asearch(self, query: str) -> List[dict]: search_url = self._build_search_url(query) if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.get(search_url, headers=self._headers) as response:
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
3ebaa66ac351-2
async with session.get(search_url, headers=self._headers) as response: response_json = await response.json() else: async with self.aiosession.get( search_url, headers=self._headers ) as response: response_json = await response.json() return response_json["value"] def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: search_results = self._search(query) return [ Document(page_content=result.pop(self.content_key), metadata=result) for result in search_results ] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: search_results = await self._asearch(query) return [ Document(page_content=result.pop(self.content_key), metadata=result) for result in search_results ]
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
98c5d77008b7-0
Source code for langchain.retrievers.zep from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.schema import BaseRetriever, Document if TYPE_CHECKING: from zep_python import MemorySearchResult [docs]class ZepRetriever(BaseRetriever): """Retriever for the Zep long-term memory store. Search your user's long-term chat history with Zep. Note: You will need to provide the user's `session_id` to use this retriever. More on Zep: Zep provides long-term conversation storage for LLM apps. The server stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs. For server installation instructions, see: https://docs.getzep.com/deployment/quickstart/ """ zep_client: Any """Zep client.""" session_id: str """Zep session ID.""" top_k: Optional[int] """Number of documents to return.""" @root_validator(pre=True) def create_client(cls, values: dict) -> dict: try: from zep_python import ZepClient except ImportError: raise ValueError( "Could not import zep-python package. " "Please install it with `pip install zep-python`." ) values["zep_client"] = values.get( "zep_client",
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html
98c5d77008b7-1
values["zep_client"] = values.get( "zep_client", ZepClient(base_url=values["url"], api_key=values.get("api_key")), ) return values def _search_result_to_doc( self, results: List[MemorySearchResult] ) -> List[Document]: return [ Document( page_content=r.message.pop("content"), metadata={"score": r.dist, **r.message}, ) for r in results if r.message ] def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, metadata: Optional[Dict] = None, ) -> List[Document]: from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload( text=query, metadata=metadata ) results: List[MemorySearchResult] = self.zep_client.search_memory( self.session_id, payload, limit=self.top_k ) return self._search_result_to_doc(results) async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, metadata: Optional[Dict] = None, ) -> List[Document]: from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload( text=query, metadata=metadata ) results: List[MemorySearchResult] = await self.zep_client.asearch_memory( self.session_id, payload, limit=self.top_k ) return self._search_result_to_doc(results)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html
6f7604943254-0
Source code for langchain.retrievers.arxiv from typing import List from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivRetriever(BaseRetriever, ArxivAPIWrapper): """ Retriever for Arxiv. It wraps load() to get_relevant_documents(). It uses all ArxivAPIWrapper arguments without any change. """ def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: return self.load(query=query)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/arxiv.html
b68dc0e387fb-0
Source code for langchain.retrievers.pubmed from typing import List from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document from langchain.utilities.pubmed import PubMedAPIWrapper [docs]class PubMedRetriever(BaseRetriever, PubMedAPIWrapper): """Retriever for PubMed API. It wraps load() to get_relevant_documents(). It uses all PubMedAPIWrapper arguments without any change. """ def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: return self.load_docs(query=query)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pubmed.html
ad34ef50c473-0
Source code for langchain.retrievers.ensemble """ Ensemble retriever that ensemble the results of multiple retrievers by using weighted Reciprocal Rank Fusion """ from typing import Any, Dict, List from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.schema import BaseRetriever, Document [docs]class EnsembleRetriever(BaseRetriever): """ This class ensemble the results of multiple retrievers by using rank fusion. Args: retrievers: A list of retrievers to ensemble. weights: A list of weights corresponding to the retrievers. Defaults to equal weighting for all retrievers. c: A constant added to the rank, controlling the balance between the importance of high-ranked items and the consideration given to lower-ranked items. Default is 60. """ retrievers: List[BaseRetriever] weights: List[float] c: int = 60 @root_validator(pre=True) def set_weights(cls, values: Dict[str, Any]) -> Dict[str, Any]: if not values.get("weights"): n_retrievers = len(values["retrievers"]) values["weights"] = [1 / n_retrievers] * n_retrievers return values def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """ Get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of reranked documents.
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/ensemble.html
ad34ef50c473-1
Returns: A list of reranked documents. """ # Get fused result of the retrievers. fused_documents = self.rank_fusion(query, run_manager) return fused_documents async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: """ Asynchronously get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get fused result of the retrievers. fused_documents = await self.arank_fusion(query, run_manager) return fused_documents [docs] def rank_fusion( self, query: str, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """ Retrieve the results of the retrievers and use rank_fusion_func to get the final result. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get the results of all retrievers. retriever_docs = [ retriever.get_relevant_documents( query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}") ) for i, retriever in enumerate(self.retrievers) ] # apply rank fusion fused_documents = self.weighted_reciprocal_rank(retriever_docs) return fused_documents [docs] async def arank_fusion( self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: """
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/ensemble.html
ad34ef50c473-2
) -> List[Document]: """ Asynchronously retrieve the results of the retrievers and use rank_fusion_func to get the final result. Args: query: The query to search for. Returns: A list of reranked documents. """ # Get the results of all retrievers. retriever_docs = [ await retriever.aget_relevant_documents( query, callbacks=run_manager.get_child(tag=f"retriever_{i+1}") ) for i, retriever in enumerate(self.retrievers) ] # apply rank fusion fused_documents = self.weighted_reciprocal_rank(retriever_docs) return fused_documents [docs] def weighted_reciprocal_rank( self, doc_lists: List[List[Document]] ) -> List[Document]: """ Perform weighted Reciprocal Rank Fusion on multiple rank lists. You can find more details about RRF here: https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf Args: doc_lists: A list of rank lists, where each rank list contains unique items. Returns: list: The final aggregated list of items sorted by their weighted RRF scores in descending order. """ if len(doc_lists) != len(self.weights): raise ValueError( "Number of rank lists must be equal to the number of weights." ) # Create a union of all unique documents in the input doc_lists all_documents = set() for doc_list in doc_lists: for doc in doc_list: all_documents.add(doc.page_content) # Initialize the RRF score dictionary for each document
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/ensemble.html
ad34ef50c473-3
# Initialize the RRF score dictionary for each document rrf_score_dic = {doc: 0.0 for doc in all_documents} # Calculate RRF scores for each document for doc_list, weight in zip(doc_lists, self.weights): for rank, doc in enumerate(doc_list, start=1): rrf_score = weight * (1 / (rank + self.c)) rrf_score_dic[doc.page_content] += rrf_score # Sort documents by their RRF scores in descending order sorted_documents = sorted( rrf_score_dic.keys(), key=lambda x: rrf_score_dic[x], reverse=True ) # Map the sorted page_content back to the original document objects page_content_to_doc_map = { doc.page_content: doc for doc_list in doc_lists for doc in doc_list } sorted_docs = [ page_content_to_doc_map[page_content] for page_content in sorted_documents ] return sorted_docs
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/ensemble.html
8d5c78dc4968-0
Source code for langchain.retrievers.databerry from typing import List, Optional import aiohttp import requests from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.schema import BaseRetriever, Document [docs]class DataberryRetriever(BaseRetriever): """Retriever for the Databerry API.""" datastore_url: str top_k: Optional[int] api_key: Optional[str] def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: response = requests.post( self.datastore_url, json={ "query": query, **({"topK": self.top_k} if self.top_k is not None else {}), }, headers={ "Content-Type": "application/json", **( {"Authorization": f"Bearer {self.api_key}"} if self.api_key is not None else {} ), }, ) data = response.json() return [ Document( page_content=r["text"], metadata={"source": r["source"], "score": r["score"]}, ) for r in data["results"] ] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: async with aiohttp.ClientSession() as session: async with session.request( "POST", self.datastore_url, json={ "query": query,
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html
8d5c78dc4968-1
self.datastore_url, json={ "query": query, **({"topK": self.top_k} if self.top_k is not None else {}), }, headers={ "Content-Type": "application/json", **( {"Authorization": f"Bearer {self.api_key}"} if self.api_key is not None else {} ), }, ) as response: data = await response.json() return [ Document( page_content=r["text"], metadata={"source": r["source"], "score": r["score"]}, ) for r in data["results"] ]
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html
7e05c55af33f-0
Source code for langchain.retrievers.elastic_search_bm25 """Wrapper around Elasticsearch vector database.""" from __future__ import annotations import uuid from typing import Any, Iterable, List from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]class ElasticSearchBM25Retriever(BaseRetriever): """Retriever for the Elasticsearch using BM25 as a retrieval method. To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the "Deployments" page. To obtain your Elastic Cloud password for the default "elastic" user: 1. Log in to the Elastic Cloud console at https://cloud.elastic.co 2. Go to "Security" > "Users" 3. Locate the "elastic" user and click "Edit" 4. Click "Reset password" 5. Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. """ client: Any """Elasticsearch client.""" index_name: str """Name of the index to use in Elasticsearch.""" [docs] @classmethod def create(
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
7e05c55af33f-1
[docs] @classmethod def create( cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75 ) -> ElasticSearchBM25Retriever: """ Create a ElasticSearchBM25Retriever from a list of texts. Args: elasticsearch_url: URL of the Elasticsearch instance to connect to. index_name: Name of the index to use in Elasticsearch. k1: BM25 parameter k1. b: BM25 parameter b. Returns: """ from elasticsearch import Elasticsearch # Create an Elasticsearch client instance es = Elasticsearch(elasticsearch_url) # Define the index settings and mappings settings = { "analysis": {"analyzer": {"default": {"type": "standard"}}}, "similarity": { "custom_bm25": { "type": "BM25", "k1": k1, "b": b, } }, } mappings = { "properties": { "content": { "type": "text", "similarity": "custom_bm25", # Use the custom BM25 similarity } } } # Create the index with the specified settings and mappings es.indices.create(index=index_name, mappings=mappings, settings=settings) return cls(client=es, index_name=index_name) [docs] def add_texts( self, texts: Iterable[str], refresh_indices: bool = True, ) -> List[str]: """Run more texts through the embeddings and add to the retriever. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
7e05c55af33f-2
"""Run more texts through the embeddings and add to the retriever. Args: texts: Iterable of strings to add to the retriever. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the retriever. """ try: from elasticsearch.helpers import bulk except ImportError: raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) requests = [] ids = [] for i, text in enumerate(texts): _id = str(uuid.uuid4()) request = { "_op_type": "index", "_index": self.index_name, "content": text, "_id": _id, } ids.append(_id) requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: query_dict = {"query": {"match": {"content": query}}} res = self.client.search(index=self.index_name, body=query_dict) docs = [] for r in res["hits"]["hits"]: docs.append(Document(page_content=r["_source"]["content"])) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
d346995cab4d-0
Source code for langchain.retrievers.zilliz import warnings from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document from langchain.vectorstores.zilliz import Zilliz # TODO: Update to ZillizClient + Hybrid Search when available [docs]class ZillizRetriever(BaseRetriever): """Retriever for the Zilliz API.""" embedding_function: Embeddings """The underlying embedding function from which documents will be retrieved.""" collection_name: str = "LangChainCollection" """The name of the collection in Zilliz.""" connection_args: Optional[Dict[str, Any]] = None """The connection arguments for the Zilliz client.""" consistency_level: str = "Session" """The consistency level for the Zilliz client.""" search_params: Optional[dict] = None """The search parameters for the Zilliz client.""" store: Zilliz """The underlying Zilliz store.""" retriever: BaseRetriever """The underlying retriever.""" @root_validator(pre=True) def create_client(cls, values: dict) -> dict: values["store"] = Zilliz( values["embedding_function"], values["collection_name"], values["connection_args"], values["consistency_level"], ) values["retriever"] = values["store"].as_retriever( search_kwargs={"param": values["search_params"]} ) return values [docs] def add_texts(
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zilliz.html
d346995cab4d-1
) return values [docs] def add_texts( self, texts: List[str], metadatas: Optional[List[dict]] = None ) -> None: """Add text to the Zilliz store Args: texts (List[str]): The text metadatas (List[dict]): Metadata dicts, must line up with existing store """ self.store.add_texts(texts, metadatas) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: return self.retriever.get_relevant_documents( query, run_manager=run_manager.get_child(), **kwargs ) [docs]def ZillizRetreiver(*args: Any, **kwargs: Any) -> ZillizRetriever: """Deprecated ZillizRetreiver. Please use ZillizRetriever ('i' before 'e') instead. Args: *args: **kwargs: Returns: ZillizRetriever """ warnings.warn( "ZillizRetreiver will be deprecated in the future. " "Please use ZillizRetriever ('i' before 'e') instead.", DeprecationWarning, ) return ZillizRetriever(*args, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zilliz.html
ddbed6732285-0
Source code for langchain.retrievers.knn """KNN Retriever. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb""" from __future__ import annotations import concurrent.futures from typing import Any, List, Optional import numpy as np from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document [docs]def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray: """ Create an index of embeddings for a list of contexts. Args: contexts: List of contexts to embed. embeddings: Embeddings model to use. Returns: Index of embeddings. """ with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts))) [docs]class KNNRetriever(BaseRetriever): """KNN Retriever.""" embeddings: Embeddings """Embeddings model to use.""" index: Any """Index of embeddings.""" texts: List[str] """List of texts to index.""" k: int = 4 """Number of results to return.""" relevancy_threshold: Optional[float] = None """Threshold for relevancy.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: List[str], embeddings: Embeddings, **kwargs: Any ) -> KNNRetriever: index = create_index(texts, embeddings)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html
ddbed6732285-1
index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: query_embeds = np.array(self.embeddings.embed_query(query)) # calc L2 norm index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True)) query_embeds = query_embeds / np.sqrt((query_embeds**2).sum()) similarities = index_embeds.dot(query_embeds) sorted_ix = np.argsort(-similarities) denominator = np.max(similarities) - np.min(similarities) + 1e-6 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [ Document(page_content=self.texts[row]) for row in sorted_ix[0 : self.k] if ( self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold ) ] return top_k_results
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html
e031e0c213f0-0
Source code for langchain.retrievers.multi_query import logging from typing import List from pydantic import BaseModel, Field from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM from langchain.output_parsers.pydantic import PydanticOutputParser from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseRetriever, Document logger = logging.getLogger(__name__) [docs]class LineList(BaseModel): """List of lines.""" lines: List[str] = Field(description="Lines of text") """List of lines.""" [docs]class LineListOutputParser(PydanticOutputParser): """Output parser for a list of lines.""" def __init__(self) -> None: super().__init__(pydantic_object=LineList) [docs] def parse(self, text: str) -> LineList: lines = text.strip().split("\n") return LineList(lines=lines) # Default prompt DEFAULT_QUERY_PROMPT = PromptTemplate( input_variables=["question"], template="""You are an AI language model assistant. Your task is to generate 3 different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}""", ) [docs]class MultiQueryRetriever(BaseRetriever): """Given a user query, use an LLM to write a set of queries.
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html
e031e0c213f0-1
"""Given a user query, use an LLM to write a set of queries. Retrieve docs for each query. Rake the unique union of all retrieved docs.""" retriever: BaseRetriever llm_chain: LLMChain verbose: bool = True parser_key: str = "lines" [docs] @classmethod def from_llm( cls, retriever: BaseRetriever, llm: BaseLLM, prompt: PromptTemplate = DEFAULT_QUERY_PROMPT, parser_key: str = "lines", ) -> "MultiQueryRetriever": """Initialize from llm using default template. Args: retriever: retriever to query documents from llm: llm for query generation using DEFAULT_QUERY_PROMPT Returns: MultiQueryRetriever """ output_parser = LineListOutputParser() llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser) return cls( retriever=retriever, llm_chain=llm_chain, parser_key=parser_key, ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Get relevated documents given a user query. Args: question: user query Returns: Unique union of relevant documents from all generated queries """ queries = self.generate_queries(query, run_manager) documents = self.retrieve_documents(queries, run_manager) unique_documents = self.unique_union(documents) return unique_documents [docs] def generate_queries(
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html
e031e0c213f0-2
return unique_documents [docs] def generate_queries( self, question: str, run_manager: CallbackManagerForRetrieverRun ) -> List[str]: """Generate queries based upon user input. Args: question: user query Returns: List of LLM generated queries that are similar to the user input """ response = self.llm_chain( {"question": question}, callbacks=run_manager.get_child() ) lines = getattr(response["text"], self.parser_key, []) if self.verbose: logger.info(f"Generated queries: {lines}") return lines [docs] def retrieve_documents( self, queries: List[str], run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Run all LLM generated queries. Args: queries: query list Returns: List of retrieved Documents """ documents = [] for query in queries: docs = self.retriever.get_relevant_documents( query, callbacks=run_manager.get_child() ) documents.extend(docs) return documents [docs] def unique_union(self, documents: List[Document]) -> List[Document]: """Get unique Documents. Args: documents: List of retrieved Documents Returns: List of unique retrieved Documents """ # Create a dictionary with page_content as keys to remove duplicates # TODO: Add Document ID property (e.g., UUID) unique_documents_dict = { (doc.page_content, tuple(sorted(doc.metadata.items()))): doc for doc in documents } unique_documents = list(unique_documents_dict.values()) return unique_documents
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html
3f7404a0233a-0
Source code for langchain.retrievers.parent_document_retriever import uuid from typing import Any, Dict, List, Optional from langchain.callbacks.base import Callbacks from langchain.schema.document import Document from langchain.schema.retriever import BaseRetriever from langchain.schema.storage import BaseStore from langchain.text_splitter import TextSplitter from langchain.vectorstores.base import VectorStore [docs]class ParentDocumentRetriever(BaseRetriever): """Fetches small chunks, then fetches their parent documents. When splitting documents for retrieval, there are often conflicting desires: 1. You may want to have small documents, so that their embeddings can most accurately reflect their meaning. If too long, then the embeddings can lose meaning. 2. You want to have long enough documents that the context of each chunk is retained. The ParentDocumentRetriever strikes that balance by splitting and storing small chunks of data. During retrieval, it first fetches the small chunks but then looks up the parent ids for those chunks and returns those larger documents. Note that "parent document" refers to the document that a small chunk originated from. This can either be the whole raw document OR a larger chunk. Examples: ... code-block:: python # Imports from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore # This text splitter is used to create the parent documents parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) # This text splitter is used to create the child documents # It should create documents smaller than the parent
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/parent_document_retriever.html
3f7404a0233a-1
# It should create documents smaller than the parent child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # The vectorstore to use to index the child chunks vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() # Initialize the retriever retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter, ) """ vectorstore: VectorStore """The underlying vectorstore to use to store small chunks and their embedding vectors""" docstore: BaseStore[str, Document] """The storage layer for the parent documents""" child_splitter: TextSplitter """The text splitter to use to create child documents.""" id_key: str = "doc_id" """The key to use to track the parent id. This will be stored in the metadata of child documents.""" parent_splitter: Optional[TextSplitter] = None """The text splitter to use to create parent documents. If none, then the parent documents will be the raw documents passed in.""" def get_relevant_documents( self, query: str, *, callbacks: Callbacks = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: sub_docs = self.vectorstore.similarity_search(query) # We do this to maintain the order of the ids that are returned ids = [] for d in sub_docs:
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/parent_document_retriever.html
3f7404a0233a-2
ids = [] for d in sub_docs: if d.metadata[self.id_key] not in ids: ids.append(d.metadata[self.id_key]) docs = self.docstore.mget(ids) return [d for d in docs if d is not None] [docs] def add_documents( self, documents: List[Document], ids: Optional[List[str]], add_to_docstore: bool = True, ) -> None: """Adds documents to the docstore and vectorstores. Args: documents: List of documents to add ids: Optional list of ids for documents. If provided should be the same length as the list of documents. Can provided if parent documents are already in the document store and you don't want to re-add to the docstore. If not provided, random UUIDs will be used as ids. add_to_docstore: Boolean of whether to add documents to docstore. This can be false if and only if `ids` are provided. You may want to set this to False if the documents are already in the docstore and you don't want to re-add them. """ if self.parent_splitter is not None: documents = self.parent_splitter.split_documents(documents) if ids is None: doc_ids = [str(uuid.uuid4()) for _ in documents] if not add_to_docstore: raise ValueError( "If ids are not passed in, `add_to_docstore` MUST be True" ) else: if len(documents) != len(ids): raise ValueError( "Got uneven list of documents and ids. "
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/parent_document_retriever.html
3f7404a0233a-3
raise ValueError( "Got uneven list of documents and ids. " "If `ids` is provided, should be same length as `documents`." ) doc_ids = ids docs = [] full_docs = [] for i, doc in enumerate(documents): _id = doc_ids[i] sub_docs = self.child_splitter.split_documents([doc]) for _doc in sub_docs: _doc.metadata[self.id_key] = _id docs.extend(sub_docs) full_docs.append((_id, doc)) self.vectorstore.add_documents(docs) if add_to_docstore: self.docstore.mset(full_docs)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/parent_document_retriever.html
86efacb4c53b-0
Source code for langchain.retrievers.chatgpt_plugin_retriever from __future__ import annotations from typing import List, Optional import aiohttp import requests from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.schema import BaseRetriever, Document [docs]class ChatGPTPluginRetriever(BaseRetriever): """Retrieves documents from a ChatGPT plugin.""" url: str """URL of the ChatGPT plugin.""" bearer_token: str """Bearer token for the ChatGPT plugin.""" top_k: int = 3 """Number of documents to return.""" filter: Optional[dict] = None """Filter to apply to the results.""" aiosession: Optional[aiohttp.ClientSession] = None """Aiohttp session to use for requests.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True """Allow arbitrary types.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: url, json, headers = self._create_request(query) response = requests.post(url, json=json, headers=headers) results = response.json()["results"][0]["results"] docs = [] for d in results: content = d.pop("text") metadata = d.pop("metadata", d) if metadata.get("source_id"): metadata["source"] = metadata.pop("source_id") docs.append(Document(page_content=content, metadata=metadata)) return docs async def _aget_relevant_documents(
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/chatgpt_plugin_retriever.html
86efacb4c53b-1
return docs async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: url, json, headers = self._create_request(query) if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.post(url, headers=headers, json=json) as response: res = await response.json() else: async with self.aiosession.post( url, headers=headers, json=json ) as response: res = await response.json() results = res["results"][0]["results"] docs = [] for d in results: content = d.pop("text") metadata = d.pop("metadata", d) if metadata.get("source_id"): metadata["source"] = metadata.pop("source_id") docs.append(Document(page_content=content, metadata=metadata)) return docs def _create_request(self, query: str) -> tuple[str, dict, dict]: url = f"{self.url}/query" json = { "queries": [ { "query": query, "filter": self.filter, "top_k": self.top_k, } ] } headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.bearer_token}", } return url, json, headers
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/chatgpt_plugin_retriever.html
8349a5bc03f7-0
Source code for langchain.retrievers.contextual_compression from typing import Any, List from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) from langchain.schema import BaseRetriever, Document [docs]class ContextualCompressionRetriever(BaseRetriever): """Retriever that wraps a base retriever and compresses the results.""" base_compressor: BaseDocumentCompressor """Compressor for compressing retrieved documents.""" base_retriever: BaseRetriever """Base Retriever to use for getting relevant documents.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: Sequence of relevant documents """ docs = self.base_retriever.get_relevant_documents( query, callbacks=run_manager.get_child(), **kwargs ) if docs: compressed_docs = self.base_compressor.compress_documents( docs, query, callbacks=run_manager.get_child() ) return list(compressed_docs) else: return [] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, **kwargs: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html
8349a5bc03f7-1
run_manager: AsyncCallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ docs = await self.base_retriever.aget_relevant_documents( query, callbacks=run_manager.get_child(), **kwargs ) if docs: compressed_docs = await self.base_compressor.acompress_documents( docs, query, callbacks=run_manager.get_child() ) return list(compressed_docs) else: return []
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html
4912be59d0b0-0
Source code for langchain.retrievers.wikipedia from typing import List from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document from langchain.utilities.wikipedia import WikipediaAPIWrapper [docs]class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper): """Retriever for Wikipedia API. It wraps load() to get_relevant_documents(). It uses all WikipediaAPIWrapper arguments without any change. """ def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: return self.load(query=query)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/wikipedia.html
a4b723bfd839-0
Source code for langchain.retrievers.google_cloud_enterprise_search """Retriever wrapper for Google Cloud Enterprise Search on Gen App Builder.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: from google.cloud.discoveryengine_v1beta import ( SearchRequest, SearchResult, SearchServiceClient, ) [docs]class GoogleCloudEnterpriseSearchRetriever(BaseRetriever): """Retriever for the Google Cloud Enterprise Search Service API. For the detailed explanation of the Enterprise Search concepts and configuration parameters refer to the product documentation. https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction """ project_id: str """Google Cloud Project ID.""" search_engine_id: str """Enterprise Search engine ID.""" serving_config_id: str = "default_config" """Enterprise Search serving config ID.""" location_id: str = "global" """Enterprise Search engine location.""" filter: Optional[str] = None """Filter expression.""" get_extractive_answers: bool = False """If True return Extractive Answers, otherwise return Extractive Segments.""" max_documents: int = Field(default=5, ge=1, le=100) """The maximum number of documents to return.""" max_extractive_answer_count: int = Field(default=1, ge=1, le=5) """The maximum number of extractive answers returned in each search result.
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/google_cloud_enterprise_search.html
a4b723bfd839-1
"""The maximum number of extractive answers returned in each search result. At most 5 answers will be returned for each SearchResult. """ max_extractive_segment_count: int = Field(default=1, ge=1, le=1) """The maximum number of extractive segments returned in each search result. Currently one segment will be returned for each SearchResult. """ query_expansion_condition: int = Field(default=1, ge=0, le=2) """Specification to determine under which conditions query expansion should occur. 0 - Unspecified query expansion condition. In this case, server behavior defaults to disabled 1 - Disabled query expansion. Only the exact search query is used, even if SearchResponse.total_size is zero. 2 - Automatic query expansion built by the Search API. """ spell_correction_mode: int = Field(default=2, ge=0, le=2) """Specification to determine under which conditions query expansion should occur. 0 - Unspecified spell correction mode. In this case, server behavior defaults to auto. 1 - Suggestion only. Search API will try to find a spell suggestion if there is any and put in the `SearchResponse.corrected_query`. The spell suggestion will not be used as the search query. 2 - Automatic spell correction built by the Search API. Search will be based on the corrected query if found. """ credentials: Any = None """The default custom credentials (google.auth.credentials.Credentials) to use when making API calls. If not provided, credentials will be ascertained from the environment.""" _client: SearchServiceClient _serving_config: str class Config:
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/google_cloud_enterprise_search.html
a4b723bfd839-2
_serving_config: str class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True underscore_attrs_are_private = True @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validates the environment.""" try: from google.cloud import discoveryengine_v1beta # noqa: F401 except ImportError as exc: raise ImportError( "google.cloud.discoveryengine is not installed. " "Please install it with pip install google-cloud-discoveryengine" ) from exc values["project_id"] = get_from_dict_or_env(values, "project_id", "PROJECT_ID") values["search_engine_id"] = get_from_dict_or_env( values, "search_engine_id", "SEARCH_ENGINE_ID" ) return values def __init__(self, **data: Any) -> None: """Initializes private fields.""" from google.cloud.discoveryengine_v1beta import SearchServiceClient super().__init__(**data) self._client = SearchServiceClient(credentials=self.credentials) self._serving_config = self._client.serving_config_path( project=self.project_id, location=self.location_id, data_store=self.search_engine_id, serving_config=self.serving_config_id, ) def _convert_search_response( self, results: Sequence[SearchResult] ) -> List[Document]: """Converts a sequence of search results to a list of LangChain documents.""" from google.protobuf.json_format import MessageToDict documents: List[Document] = [] for result in results: document_dict = MessageToDict(
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/google_cloud_enterprise_search.html
a4b723bfd839-3
for result in results: document_dict = MessageToDict( result.document._pb, preserving_proto_field_name=True ) derived_struct_data = document_dict.get("derived_struct_data", None) if not derived_struct_data: continue doc_metadata = document_dict.get("struct_data", {}) doc_metadata["id"] = document_dict["id"] chunk_type = ( "extractive_answers" if self.get_extractive_answers else "extractive_segments" ) for chunk in getattr(derived_struct_data, chunk_type, []): doc_metadata["source"] = derived_struct_data.get("link", "") if chunk_type == "extractive_answers": doc_metadata["source"] += f":{chunk.get('pageNumber', '')}" documents.append( Document( page_content=chunk.get("content", ""), metadata=doc_metadata ) ) return documents def _create_search_request(self, query: str) -> SearchRequest: """Prepares a SearchRequest object.""" from google.cloud.discoveryengine_v1beta import SearchRequest query_expansion_spec = SearchRequest.QueryExpansionSpec( condition=self.query_expansion_condition, ) spell_correction_spec = SearchRequest.SpellCorrectionSpec( mode=self.spell_correction_mode ) if self.get_extractive_answers: extractive_content_spec = ( SearchRequest.ContentSearchSpec.ExtractiveContentSpec( max_extractive_answer_count=self.max_extractive_answer_count, ) ) else: extractive_content_spec = ( SearchRequest.ContentSearchSpec.ExtractiveContentSpec( max_extractive_segment_count=self.max_extractive_segment_count, ) )
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/google_cloud_enterprise_search.html
a4b723bfd839-4
max_extractive_segment_count=self.max_extractive_segment_count, ) ) content_search_spec = SearchRequest.ContentSearchSpec( extractive_content_spec=extractive_content_spec, ) return SearchRequest( query=query, filter=self.filter, serving_config=self._serving_config, page_size=self.max_documents, content_search_spec=content_search_spec, query_expansion_spec=query_expansion_spec, spell_correction_spec=spell_correction_spec, ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant for a query.""" search_request = self._create_search_request(query) response = self._client.search(search_request) documents = self._convert_search_response(response.results) return documents
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/google_cloud_enterprise_search.html
347aa0556f1e-0
Source code for langchain.retrievers.vespa_retriever from __future__ import annotations import json from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Sequence, Union from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document if TYPE_CHECKING: from vespa.application import Vespa [docs]class VespaRetriever(BaseRetriever): """Retriever that uses Vespa.""" app: Vespa """Vespa application to query.""" body: Dict """Body of the query.""" content_field: str """Name of the content field.""" metadata_fields: Sequence[str] """Names of the metadata fields.""" def _query(self, body: Dict) -> List[Document]: response = self.app.query(body) if not str(response.status_code).startswith("2"): raise RuntimeError( "Could not retrieve data from Vespa. Error code: {}".format( response.status_code ) ) root = response.json["root"] if "errors" in root: raise RuntimeError(json.dumps(root["errors"])) docs = [] for child in response.hits: page_content = child["fields"].pop(self.content_field, "") if self.metadata_fields == "*": metadata = child["fields"] else: metadata = {mf: child["fields"].get(mf) for mf in self.metadata_fields} metadata["id"] = child["id"] docs.append(Document(page_content=page_content, metadata=metadata)) return docs def _get_relevant_documents(
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
347aa0556f1e-1
return docs def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: body = self.body.copy() body["query"] = query return self._query(body) [docs] def get_relevant_documents_with_filter( self, query: str, *, _filter: Optional[str] = None ) -> List[Document]: body = self.body.copy() _filter = f" and {_filter}" if _filter else "" body["yql"] = body["yql"] + _filter body["query"] = query return self._query(body) [docs] @classmethod def from_params( cls, url: str, content_field: str, *, k: Optional[int] = None, metadata_fields: Union[Sequence[str], Literal["*"]] = (), sources: Union[Sequence[str], Literal["*"], None] = None, _filter: Optional[str] = None, yql: Optional[str] = None, **kwargs: Any, ) -> VespaRetriever: """Instantiate retriever from params. Args: url (str): Vespa app URL. content_field (str): Field in results to return as Document page_content. k (Optional[int]): Number of Documents to return. Defaults to None. metadata_fields(Sequence[str] or "*"): Fields in results to include in document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None): Sources to retrieve from. Defaults to None. _filter (Optional[str]): Document filter condition expressed in YQL.
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
347aa0556f1e-2
_filter (Optional[str]): Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]): Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any): Keyword arguments added to query body. Returns: VespaRetriever: Instantiated VespaRetriever. """ try: from vespa.application import Vespa except ImportError: raise ImportError( "pyvespa is not installed, please install with `pip install pyvespa`" ) app = Vespa(url) body = kwargs.copy() if yql and (sources or _filter): raise ValueError( "yql should only be specified if both sources and _filter are not " "specified." ) else: if metadata_fields == "*": _fields = "*" body["summary"] = "short" else: _fields = ", ".join([content_field] + list(metadata_fields or [])) _sources = ", ".join(sources) if isinstance(sources, Sequence) else "*" _filter = f" and {_filter}" if _filter else "" yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}" body["yql"] = yql if k: body["hits"] = k return cls( app=app, body=body, content_field=content_field, metadata_fields=metadata_fields, )
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
e165b9bcd978-0
Source code for langchain.retrievers.tfidf from __future__ import annotations import pickle from pathlib import Path from typing import Any, Dict, Iterable, List, Optional from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document [docs]class TFIDFRetriever(BaseRetriever): """TF-IDF Retriever. Largely based on https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb """ vectorizer: Any """TF-IDF vectorizer.""" docs: List[Document] """Documents.""" tfidf_array: Any """TF-IDF array.""" k: int = 4 """Number of documents to return.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: Iterable[str], metadatas: Optional[Iterable[dict]] = None, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> TFIDFRetriever: try: from sklearn.feature_extraction.text import TfidfVectorizer except ImportError: raise ImportError( "Could not import scikit-learn, please install with `pip install " "scikit-learn`." ) tfidf_params = tfidf_params or {} vectorizer = TfidfVectorizer(**tfidf_params) tfidf_array = vectorizer.fit_transform(texts)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html
e165b9bcd978-1
tfidf_array = vectorizer.fit_transform(texts) metadatas = metadatas or ({} for _ in texts) docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)] return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs) [docs] @classmethod def from_documents( cls, documents: Iterable[Document], *, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> TFIDFRetriever: texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents)) return cls.from_texts( texts=texts, tfidf_params=tfidf_params, metadatas=metadatas, **kwargs ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: from sklearn.metrics.pairwise import cosine_similarity query_vec = self.vectorizer.transform( [query] ) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats) results = cosine_similarity(self.tfidf_array, query_vec).reshape( (-1,) ) # Op -- (n_docs,1) -- Cosine Sim with each doc return_docs = [self.docs[i] for i in results.argsort()[-self.k :][::-1]] return return_docs [docs] def save_local( self, folder_path: str, file_name: str = "tfidf_vectorizer", ) -> None: try: import joblib
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html
e165b9bcd978-2
) -> None: try: import joblib except ImportError: raise ImportError( "Could not import joblib, please install with `pip install joblib`." ) path = Path(folder_path) path.mkdir(exist_ok=True, parents=True) # Save vectorizer with joblib dump. joblib.dump(self.vectorizer, path / f"{file_name}.joblib") # Save docs and tfidf array as pickle. with open(path / f"{file_name}.pkl", "wb") as f: pickle.dump((self.docs, self.tfidf_array), f) [docs] @classmethod def load_local( cls, folder_path: str, file_name: str = "tfidf_vectorizer", ) -> TFIDFRetriever: try: import joblib except ImportError: raise ImportError( "Could not import joblib, please install with `pip install joblib`." ) path = Path(folder_path) # Load vectorizer with joblib load. vectorizer = joblib.load(path / f"{file_name}.joblib") # Load docs and tfidf array as pickle. with open(path / f"{file_name}.pkl", "rb") as f: docs, tfidf_array = pickle.load(f) return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html
6bf3e2b2ef1f-0
Source code for langchain.retrievers.llama_index from typing import Any, Dict, List, cast from pydantic import Field from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document [docs]class LlamaIndexRetriever(BaseRetriever): """Retriever for the question-answering with sources over an LlamaIndex data structure.""" index: Any """LlamaIndex index to query.""" query_kwargs: Dict = Field(default_factory=dict) """Keyword arguments to pass to the query method.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant for a query.""" try: from llama_index.indices.base import BaseGPTIndex from llama_index.response.schema import Response except ImportError: raise ImportError( "You need to install `pip install llama-index` to use this retriever." ) index = cast(BaseGPTIndex, self.index) response = index.query(query, response_mode="no_text", **self.query_kwargs) response = cast(Response, response) # parse source nodes docs = [] for source_node in response.source_nodes: metadata = source_node.extra_info or {} docs.append( Document(page_content=source_node.source_text, metadata=metadata) ) return docs [docs]class LlamaIndexGraphRetriever(BaseRetriever): """Retriever for question-answering with sources over an LlamaIndex graph data structure.""" graph: Any """LlamaIndex graph to query."""
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/llama_index.html
6bf3e2b2ef1f-1
graph: Any """LlamaIndex graph to query.""" query_configs: List[Dict] = Field(default_factory=list) """List of query configs to pass to the query method.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Get documents relevant for a query.""" try: from llama_index.composability.graph import ( QUERY_CONFIG_TYPE, ComposableGraph, ) from llama_index.response.schema import Response except ImportError: raise ImportError( "You need to install `pip install llama-index` to use this retriever." ) graph = cast(ComposableGraph, self.graph) # for now, inject response_mode="no_text" into query configs for query_config in self.query_configs: query_config["response_mode"] = "no_text" query_configs = cast(List[QUERY_CONFIG_TYPE], self.query_configs) response = graph.query(query, query_configs=query_configs) response = cast(Response, response) # parse source nodes docs = [] for source_node in response.source_nodes: metadata = source_node.extra_info or {} docs.append( Document(page_content=source_node.source_text, metadata=metadata) ) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/llama_index.html
9c2c9b600ffd-0
Source code for langchain.retrievers.kendra import re from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Union from pydantic import BaseModel, Extra, root_validator, validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]def clean_excerpt(excerpt: str) -> str: """Cleans an excerpt from Kendra. Args: excerpt: The excerpt to clean. Returns: The cleaned excerpt. """ if not excerpt: return excerpt res = re.sub("\s+", " ", excerpt).replace("...", "") return res [docs]def combined_text(item: "ResultItem") -> str: """Combines a ResultItem title and excerpt into a single string. Args: item: the ResultItem of a Kendra search. Returns: A combined text of the title and excerpt of the given item. """ text = "" title = item.get_title() if title: text += f"Document Title: {title}\n" excerpt = clean_excerpt(item.get_excerpt()) if excerpt: text += f"Document Excerpt: \n{excerpt}\n" return text DocumentAttributeValueType = Union[str, int, List[str], None] """Possible types of a DocumentAttributeValue. Dates are also represented as str.""" [docs]class Highlight(BaseModel, extra=Extra.allow): """ Represents the information that can be used to highlight key words in the excerpt. """ BeginOffset: int """The zero-based location in the excerpt where the highlight starts."""
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
9c2c9b600ffd-1
"""The zero-based location in the excerpt where the highlight starts.""" EndOffset: int """The zero-based location in the excerpt where the highlight ends.""" TopAnswer: Optional[bool] """Indicates whether the result is the best one.""" Type: Optional[str] """The highlight type: STANDARD or THESAURUS_SYNONYM.""" [docs]class TextWithHighLights(BaseModel, extra=Extra.allow): """Text with highlights.""" Text: str """The text.""" Highlights: Optional[Any] """The highlights.""" [docs]class AdditionalResultAttributeValue(BaseModel, extra=Extra.allow): """The value of an additional result attribute.""" TextWithHighlightsValue: TextWithHighLights """The text with highlights value.""" [docs]class AdditionalResultAttribute(BaseModel, extra=Extra.allow): """An additional result attribute.""" Key: str """The key of the attribute.""" ValueType: Literal["TEXT_WITH_HIGHLIGHTS_VALUE"] """The type of the value.""" Value: AdditionalResultAttributeValue """The value of the attribute.""" [docs] def get_value_text(self) -> str: return self.Value.TextWithHighlightsValue.Text [docs]class DocumentAttributeValue(BaseModel, extra=Extra.allow): """The value of a document attribute.""" DateValue: Optional[str] """The date expressed as an ISO 8601 string.""" LongValue: Optional[int] """The long value.""" StringListValue: Optional[List[str]] """The string list value.""" StringValue: Optional[str] """The string value.""" @property def value(self) -> DocumentAttributeValueType: """The only defined document attribute value or None.
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
9c2c9b600ffd-2
"""The only defined document attribute value or None. According to Amazon Kendra, you can only provide one value for a document attribute. """ if self.DateValue: return self.DateValue if self.LongValue: return self.LongValue if self.StringListValue: return self.StringListValue if self.StringValue: return self.StringValue return None [docs]class DocumentAttribute(BaseModel, extra=Extra.allow): """A document attribute.""" Key: str """The key of the attribute.""" Value: DocumentAttributeValue """The value of the attribute.""" [docs]class ResultItem(BaseModel, ABC, extra=Extra.allow): """Abstract class that represents a result item.""" Id: Optional[str] """The ID of the relevant result item.""" DocumentId: Optional[str] """The document ID.""" DocumentURI: Optional[str] """The document URI.""" DocumentAttributes: Optional[List[DocumentAttribute]] = [] """The document attributes.""" [docs] @abstractmethod def get_title(self) -> str: """Document title.""" [docs] @abstractmethod def get_excerpt(self) -> str: """Document excerpt or passage original content as retrieved by Kendra.""" [docs] def get_additional_metadata(self) -> dict: """Document additional metadata dict. This returns any extra metadata except these: * result_id * document_id * source * title * excerpt * document_attributes """ return {} [docs] def get_document_attributes_dict(self) -> Dict[str, DocumentAttributeValueType]: """Document attributes dict."""
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
9c2c9b600ffd-3
"""Document attributes dict.""" return {attr.Key: attr.Value.value for attr in (self.DocumentAttributes or [])} [docs] def to_doc( self, page_content_formatter: Callable[["ResultItem"], str] = combined_text ) -> Document: """Converts this item to a Document.""" page_content = page_content_formatter(self) metadata = self.get_additional_metadata() metadata.update( { "result_id": self.Id, "document_id": self.DocumentId, "source": self.DocumentURI, "title": self.get_title(), "excerpt": self.get_excerpt(), "document_attributes": self.get_document_attributes_dict(), } ) return Document(page_content=page_content, metadata=metadata) [docs]class QueryResultItem(ResultItem): """A Query API result item.""" DocumentTitle: TextWithHighLights """The document title.""" FeedbackToken: Optional[str] """Identifies a particular result from a particular query.""" Format: Optional[str] """ If the Type is ANSWER, then format is either: * TABLE: a table excerpt is returned in TableExcerpt; * TEXT: a text excerpt is returned in DocumentExcerpt. """ Type: Optional[str] """Type of result: DOCUMENT or QUESTION_ANSWER or ANSWER""" AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = [] """One or more additional attributes associated with the result.""" DocumentExcerpt: Optional[TextWithHighLights] """Excerpt of the document text.""" [docs] def get_title(self) -> str: return self.DocumentTitle.Text [docs] def get_attribute_value(self) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
9c2c9b600ffd-4
[docs] def get_attribute_value(self) -> str: if not self.AdditionalAttributes: return "" if not self.AdditionalAttributes[0]: return "" else: return self.AdditionalAttributes[0].get_value_text() [docs] def get_excerpt(self) -> str: if ( self.AdditionalAttributes and self.AdditionalAttributes[0].Key == "AnswerText" ): excerpt = self.get_attribute_value() elif self.DocumentExcerpt: excerpt = self.DocumentExcerpt.Text else: excerpt = "" return excerpt [docs] def get_additional_metadata(self) -> dict: additional_metadata = {"type": self.Type} return additional_metadata [docs]class RetrieveResultItem(ResultItem): """A Retrieve API result item.""" DocumentTitle: Optional[str] """The document title.""" Content: Optional[str] """The content of the item.""" [docs] def get_title(self) -> str: return self.DocumentTitle or "" [docs] def get_excerpt(self) -> str: return self.Content or "" [docs]class QueryResult(BaseModel, extra=Extra.allow): """ Represents an Amazon Kendra Query API search result, which is composed of: * Relevant suggested answers: either a text excerpt or table excerpt. * Matching FAQs or questions-answer from your FAQ file. * Documents including an excerpt of each document with the its title. """ ResultItems: List[QueryResultItem] """The result items.""" [docs]class RetrieveResult(BaseModel, extra=Extra.allow): """ Represents an Amazon Kendra Retrieve API search result, which is composed of:
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
9c2c9b600ffd-5
""" Represents an Amazon Kendra Retrieve API search result, which is composed of: * relevant passages or text excerpts given an input query. """ QueryId: str """The ID of the query.""" ResultItems: List[RetrieveResultItem] """The result items.""" [docs]class AmazonKendraRetriever(BaseRetriever): """Retriever for the Amazon Kendra Index. Args: index_id: Kendra index id region_name: The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config. credentials_profile_name: The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. top_k: No of results to return attribute_filter: Additional filtering of results based on metadata See: https://docs.aws.amazon.com/kendra/latest/APIReference page_content_formatter: generates the Document page_content allowing access to all result item attributes. By default, it uses the item's title and excerpt. client: boto3 client for Kendra user_context: Provides information about the user context See: https://docs.aws.amazon.com/kendra/latest/APIReference Example: .. code-block:: python retriever = AmazonKendraRetriever( index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03" ) """ index_id: str region_name: Optional[str] = None
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
9c2c9b600ffd-6
""" index_id: str region_name: Optional[str] = None credentials_profile_name: Optional[str] = None top_k: int = 3 attribute_filter: Optional[Dict] = None page_content_formatter: Callable[[ResultItem], str] = combined_text client: Any user_context: Optional[Dict] = None @validator("top_k") def validate_top_k(cls, value: int) -> int: if value < 0: raise ValueError(f"top_k ({value}) cannot be negative.") return value @root_validator(pre=True) def create_client(cls, values: Dict[str, Any]) -> Dict[str, Any]: if values.get("client") is not None: return values try: import boto3 if values.get("credentials_profile_name"): session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values.get("region_name"): client_params["region_name"] = values["region_name"] values["client"] = session.client("kendra", **client_params) return values except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e def _kendra_query(self, query: str) -> Sequence[ResultItem]: kendra_kwargs = {
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
9c2c9b600ffd-7
kendra_kwargs = { "IndexId": self.index_id, "QueryText": query.strip(), "PageSize": self.top_k, } if self.attribute_filter is not None: kendra_kwargs["AttributeFilter"] = self.attribute_filter if self.user_context is not None: kendra_kwargs["UserContext"] = self.user_context response = self.client.retrieve(**kendra_kwargs) r_result = RetrieveResult.parse_obj(response) if r_result.ResultItems: return r_result.ResultItems # Retrieve API returned 0 results, fall back to Query API response = self.client.query(**kendra_kwargs) q_result = QueryResult.parse_obj(response) return q_result.ResultItems def _get_top_k_docs(self, result_items: Sequence[ResultItem]) -> List[Document]: top_docs = [ item.to_doc(self.page_content_formatter) for item in result_items[: self.top_k] ] return top_docs def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Run search on Kendra index and get top k documents Example: .. code-block:: python docs = retriever.get_relevant_documents('This is my query') """ result_items = self._kendra_query(query) top_k_docs = self._get_top_k_docs(result_items) return top_k_docs
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html
f7299f0de97e-0
Source code for langchain.retrievers.bm25 """ BM25 Retriever without elastic search """ from __future__ import annotations from typing import Any, Callable, Dict, Iterable, List, Optional from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document [docs]def default_preprocessing_func(text: str) -> List[str]: return text.split() [docs]class BM25Retriever(BaseRetriever): """BM25 Retriever without elastic search.""" vectorizer: Any """ BM25 vectorizer.""" docs: List[Document] """ List of documents.""" k: int = 4 """ Number of documents to return.""" preprocess_func: Callable[[str], List[str]] = default_preprocessing_func """ Preprocessing function to use on the text before BM25 vectorization.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: Iterable[str], metadatas: Optional[Iterable[dict]] = None, bm25_params: Optional[Dict[str, Any]] = None, preprocess_func: Callable[[str], List[str]] = default_preprocessing_func, **kwargs: Any, ) -> BM25Retriever: """ Create a BM25Retriever from a list of texts. Args: texts: A list of texts to vectorize. metadatas: A list of metadata dicts to associate with each text. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization.
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/bm25.html
f7299f0de97e-1
preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other arguments to pass to the retriever. Returns: A BM25Retriever instance. """ try: from rank_bm25 import BM25Okapi except ImportError: raise ImportError( "Could not import rank_bm25, please install with `pip install " "rank_bm25`." ) texts_processed = [preprocess_func(t) for t in texts] bm25_params = bm25_params or {} vectorizer = BM25Okapi(texts_processed, **bm25_params) metadatas = metadatas or ({} for _ in texts) docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)] return cls( vectorizer=vectorizer, docs=docs, preprocess_func=preprocess_func, **kwargs ) [docs] @classmethod def from_documents( cls, documents: Iterable[Document], *, bm25_params: Optional[Dict[str, Any]] = None, preprocess_func: Callable[[str], List[str]] = default_preprocessing_func, **kwargs: Any, ) -> BM25Retriever: """ Create a BM25Retriever from a list of Documents. Args: documents: A list of Documents to vectorize. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other arguments to pass to the retriever. Returns: A BM25Retriever instance. """
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/bm25.html
f7299f0de97e-2
Returns: A BM25Retriever instance. """ texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents)) return cls.from_texts( texts=texts, bm25_params=bm25_params, metadatas=metadatas, preprocess_func=preprocess_func, **kwargs, ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: processed_query = self.preprocess_func(query) return_docs = self.vectorizer.get_top_n(processed_query, self.docs, n=self.k) return return_docs
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/bm25.html
df0e4dc9a3ea-0
Source code for langchain.retrievers.metal from typing import Any, List, Optional from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document [docs]class MetalRetriever(BaseRetriever): """Retriever that uses the Metal API.""" client: Any """The Metal client to use.""" params: Optional[dict] = None """The parameters to pass to the Metal client.""" @root_validator(pre=True) def validate_client(cls, values: dict) -> dict: """Validate that the client is of the correct type.""" from metal_sdk.metal import Metal if "client" in values: client = values["client"] if not isinstance(client, Metal): raise ValueError( "Got unexpected client, should be of type metal_sdk.metal.Metal. " f"Instead, got {type(client)}" ) values["params"] = values.get("params", {}) return values def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: results = self.client.search({"text": query}, **self.params) final_results = [] for r in results["data"]: metadata = {k: v for k, v in r.items() if k != "text"} final_results.append(Document(page_content=r["text"], metadata=metadata)) return final_results
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/metal.html
b82b5c37e7a2-0
Source code for langchain.retrievers.milvus """Milvus Retriever""" import warnings from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document from langchain.vectorstores.milvus import Milvus # TODO: Update to MilvusClient + Hybrid Search when available [docs]class MilvusRetriever(BaseRetriever): """Retriever that uses the Milvus API.""" embedding_function: Embeddings collection_name: str = "LangChainCollection" connection_args: Optional[Dict[str, Any]] = None consistency_level: str = "Session" search_params: Optional[dict] = None store: Milvus retriever: BaseRetriever @root_validator(pre=True) def create_retriever(cls, values: Dict) -> Dict: """Create the Milvus store and retriever.""" values["store"] = Milvus( values["embedding_function"], values["collection_name"], values["connection_args"], values["consistency_level"], ) values["retriever"] = values["store"].as_retriever( search_kwargs={"param": values["search_params"]} ) return values [docs] def add_texts( self, texts: List[str], metadatas: Optional[List[dict]] = None ) -> None: """Add text to the Milvus store Args: texts (List[str]): The text
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/milvus.html
b82b5c37e7a2-1
Args: texts (List[str]): The text metadatas (List[dict]): Metadata dicts, must line up with existing store """ self.store.add_texts(texts, metadatas) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: return self.retriever.get_relevant_documents( query, run_manager=run_manager.get_child(), **kwargs ) [docs]def MilvusRetreiver(*args: Any, **kwargs: Any) -> MilvusRetriever: """Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead. Args: *args: **kwargs: Returns: MilvusRetriever """ warnings.warn( "MilvusRetreiver will be deprecated in the future. " "Please use MilvusRetriever ('i' before 'e') instead.", DeprecationWarning, ) return MilvusRetriever(*args, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/milvus.html
13aa503ac350-0
Source code for langchain.retrievers.svm from __future__ import annotations import concurrent.futures from typing import Any, Iterable, List, Optional import numpy as np from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document [docs]def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray: """ Create an index of embeddings for a list of contexts. Args: contexts: List of contexts to embed. embeddings: Embeddings model to use. Returns: Index of embeddings. """ with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts))) [docs]class SVMRetriever(BaseRetriever): """SVM Retriever. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb """ embeddings: Embeddings """Embeddings model to use.""" index: Any """Index of embeddings.""" texts: List[str] """List of texts to index.""" k: int = 4 """Number of results to return.""" relevancy_threshold: Optional[float] = None """Threshold for relevancy.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: List[str], embeddings: Embeddings, **kwargs: Any ) -> SVMRetriever: index = create_index(texts, embeddings)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
13aa503ac350-1
) -> SVMRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs) [docs] @classmethod def from_documents( cls, documents: Iterable[Document], embeddings: Embeddings, **kwargs: Any, ) -> SVMRetriever: texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents)) return cls.from_texts(texts=texts, embeddings=embeddings, **kwargs) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: try: from sklearn import svm except ImportError: raise ImportError( "Could not import scikit-learn, please install with `pip install " "scikit-learn`." ) query_embeds = np.array(self.embeddings.embed_query(query)) x = np.concatenate([query_embeds[None, ...], self.index]) y = np.zeros(x.shape[0]) y[0] = 1 clf = svm.LinearSVC( class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1 ) clf.fit(x, y) similarities = clf.decision_function(x) sorted_ix = np.argsort(-similarities) # svm.LinearSVC in scikit-learn is non-deterministic. # if a text is the same as a query, there is no guarantee # the query will be in the first index. # this performs a simple swap, this works because anything
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
13aa503ac350-2
# this performs a simple swap, this works because anything # left of the 0 should be equivalent. zero_index = np.where(sorted_ix == 0)[0][0] if zero_index != 0: sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0] denominator = np.max(similarities) - np.min(similarities) + 1e-6 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [] for row in sorted_ix[1 : self.k + 1]: if ( self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold ): top_k_results.append(Document(page_content=self.texts[row - 1])) return top_k_results
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
b79466f23987-0
Source code for langchain.retrievers.pinecone_hybrid_search """Taken from: https://docs.pinecone.io/docs/hybrid-search""" import hashlib from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document [docs]def hash_text(text: str) -> str: """Hash a text using SHA256. Args: text: Text to hash. Returns: Hashed text. """ return str(hashlib.sha256(text.encode("utf-8")).hexdigest()) [docs]def create_index( contexts: List[str], index: Any, embeddings: Embeddings, sparse_encoder: Any, ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, ) -> None: """ Create a Pinecone index from a list of contexts. Modifies the index argument in-place. Args: contexts: List of contexts to embed. index: Pinecone index to use. embeddings: Embeddings model to use. sparse_encoder: Sparse encoder to use. ids: List of ids to use for the documents. metadatas: List of metadata to use for the documents. """ batch_size = 32 _iterator = range(0, len(contexts), batch_size) try: from tqdm.auto import tqdm _iterator = tqdm(_iterator) except ImportError: pass if ids is None: # create unique ids using hash of the text
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
b79466f23987-1
if ids is None: # create unique ids using hash of the text ids = [hash_text(context) for context in contexts] for i in _iterator: # find end of batch i_end = min(i + batch_size, len(contexts)) # extract batch context_batch = contexts[i:i_end] batch_ids = ids[i:i_end] metadata_batch = ( metadatas[i:i_end] if metadatas else [{} for _ in context_batch] ) # add context passages as metadata meta = [ {"context": context, **metadata} for context, metadata in zip(context_batch, metadata_batch) ] # create dense vectors dense_embeds = embeddings.embed_documents(context_batch) # create sparse vectors sparse_embeds = sparse_encoder.encode_documents(context_batch) for s in sparse_embeds: s["values"] = [float(s1) for s1 in s["values"]] vectors = [] # loop through the data and create dictionaries for upserts for doc_id, sparse, dense, metadata in zip( batch_ids, sparse_embeds, dense_embeds, meta ): vectors.append( { "id": doc_id, "sparse_values": sparse, "values": dense, "metadata": metadata, } ) # upload the documents to the new hybrid index index.upsert(vectors) [docs]class PineconeHybridSearchRetriever(BaseRetriever): """Pinecone Hybrid Search Retriever.""" embeddings: Embeddings """Embeddings model to use.""" """description""" sparse_encoder: Any
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
b79466f23987-2
"""Embeddings model to use.""" """description""" sparse_encoder: Any """Sparse encoder to use.""" index: Any """Pinecone index to use.""" top_k: int = 4 """Number of documents to return.""" alpha: float = 0.5 """Alpha value for hybrid search.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def add_texts( self, texts: List[str], ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, ) -> None: create_index( texts, self.index, self.embeddings, self.sparse_encoder, ids=ids, metadatas=metadatas, ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401 from pinecone_text.sparse.base_sparse_encoder import ( BaseSparseEncoder, # noqa:F401 ) except ImportError: raise ValueError( "Could not import pinecone_text python package. " "Please install it with `pip install pinecone_text`." ) return values def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: from pinecone_text.hybrid import hybrid_convex_scale sparse_vec = self.sparse_encoder.encode_queries(query)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
b79466f23987-3
sparse_vec = self.sparse_encoder.encode_queries(query) # convert the question into a dense vector dense_vec = self.embeddings.embed_query(query) # scale alpha with hybrid_scale dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha) sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]] # query pinecone with the query parameters result = self.index.query( vector=dense_vec, sparse_vector=sparse_vec, top_k=self.top_k, include_metadata=True, ) final_result = [] for res in result["matches"]: context = res["metadata"].pop("context") final_result.append( Document(page_content=context, metadata=res["metadata"]) ) # return search results as json return final_result
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
5e99289fe8e9-0
Source code for langchain.retrievers.docarray from enum import Enum from typing import Any, Dict, List, Optional, Union import numpy as np from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document from langchain.vectorstores.utils import maximal_marginal_relevance [docs]class SearchType(str, Enum): """Enumerator of the types of search to perform.""" similarity = "similarity" mmr = "mmr" [docs]class DocArrayRetriever(BaseRetriever): """ Retriever for DocArray Document Indices. Currently, supports 5 backends: InMemoryExactNNIndex, HnswDocumentIndex, QdrantDocumentIndex, ElasticDocIndex, and WeaviateDocumentIndex. Args: index: One of the above-mentioned index instances embeddings: Embedding model to represent text as vectors search_field: Field to consider for searching in the documents. Should be an embedding/vector/tensor. content_field: Field that represents the main content in your document schema. Will be used as a `page_content`. Everything else will go into `metadata`. search_type: Type of search to perform (similarity / mmr) filters: Filters applied for document retrieval. top_k: Number of documents to return """ index: Any embeddings: Embeddings search_field: str content_field: str search_type: SearchType = SearchType.similarity top_k: int = 1 filters: Optional[Any] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/docarray.html
5e99289fe8e9-1
"""Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ query_emb = np.array(self.embeddings.embed_query(query)) if self.search_type == SearchType.similarity: results = self._similarity_search(query_emb) elif self.search_type == SearchType.mmr: results = self._mmr_search(query_emb) else: raise ValueError( f"Search type {self.search_type} does not exist. " f"Choose either 'similarity' or 'mmr'." ) return results def _search( self, query_emb: np.ndarray, top_k: int ) -> List[Union[Dict[str, Any], Any]]: """ Perform a search using the query embedding and return top_k documents. Args: query_emb: Query represented as an embedding top_k: Number of documents to return Returns: A list of top_k documents matching the query """ from docarray.index import ElasticDocIndex, WeaviateDocumentIndex filter_args = {} search_field = self.search_field if isinstance(self.index, WeaviateDocumentIndex): filter_args["where_filter"] = self.filters search_field = "" elif isinstance(self.index, ElasticDocIndex): filter_args["query"] = self.filters else: filter_args["filter_query"] = self.filters
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/docarray.html
5e99289fe8e9-2
else: filter_args["filter_query"] = self.filters if self.filters: query = ( self.index.build_query() # get empty query object .find( query=query_emb, search_field=search_field ) # add vector similarity search .filter(**filter_args) # add filter search .build(limit=top_k) # build the query ) # execute the combined query and return the results docs = self.index.execute_query(query) if hasattr(docs, "documents"): docs = docs.documents docs = docs[:top_k] else: docs = self.index.find( query=query_emb, search_field=search_field, limit=top_k ).documents return docs def _similarity_search(self, query_emb: np.ndarray) -> List[Document]: """ Perform a similarity search. Args: query_emb: Query represented as an embedding Returns: A list of documents most similar to the query """ docs = self._search(query_emb=query_emb, top_k=self.top_k) results = [self._docarray_to_langchain_doc(doc) for doc in docs] return results def _mmr_search(self, query_emb: np.ndarray) -> List[Document]: """ Perform a maximal marginal relevance (mmr) search. Args: query_emb: Query represented as an embedding Returns: A list of diverse documents related to the query """ docs = self._search(query_emb=query_emb, top_k=20) mmr_selected = maximal_marginal_relevance( query_emb, [ doc[self.search_field] if isinstance(doc, dict)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/docarray.html
5e99289fe8e9-3
[ doc[self.search_field] if isinstance(doc, dict) else getattr(doc, self.search_field) for doc in docs ], k=self.top_k, ) results = [self._docarray_to_langchain_doc(docs[idx]) for idx in mmr_selected] return results def _docarray_to_langchain_doc(self, doc: Union[Dict[str, Any], Any]) -> Document: """ Convert a DocArray document (which also might be a dict) to a langchain document format. DocArray document can contain arbitrary fields, so the mapping is done in the following way: page_content <-> content_field metadata <-> all other fields excluding tensors and embeddings (so float, int, string) Args: doc: DocArray document Returns: Document in langchain format Raises: ValueError: If the document doesn't contain the content field """ fields = doc.keys() if isinstance(doc, dict) else doc.__fields__ if self.content_field not in fields: raise ValueError( f"Document does not contain the content field - {self.content_field}." ) lc_doc = Document( page_content=doc[self.content_field] if isinstance(doc, dict) else getattr(doc, self.content_field) ) for name in fields: value = doc[name] if isinstance(doc, dict) else getattr(doc, name) if ( isinstance(value, (str, int, float, bool)) and name != self.content_field ): lc_doc.metadata[name] = value return lc_doc
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/docarray.html
13bfd67e3c33-0
Source code for langchain.retrievers.time_weighted_retriever import datetime from copy import deepcopy from typing import Any, Dict, List, Optional, Tuple from pydantic import Field from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document from langchain.vectorstores.base import VectorStore def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float: """Get the hours passed between two datetime objects.""" return (time - ref_time).total_seconds() / 3600 [docs]class TimeWeightedVectorStoreRetriever(BaseRetriever): """Retriever that combines embedding similarity with recency in retrieving values.""" vectorstore: VectorStore """The vectorstore to store documents and determine salience.""" search_kwargs: dict = Field(default_factory=lambda: dict(k=100)) """Keyword arguments to pass to the vectorstore similarity search.""" # TODO: abstract as a queue memory_stream: List[Document] = Field(default_factory=list) """The memory_stream of documents to search through.""" decay_rate: float = Field(default=0.01) """The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).""" k: int = 4 """The maximum number of documents to retrieve in a given call.""" other_score_keys: List[str] = [] """Other keys in the metadata to factor into the score, e.g. 'importance'.""" default_salience: Optional[float] = None """The salience to assign memories not retrieved from the vector store. None assigns no salience to documents not fetched from the vector store. """ class Config:
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
13bfd67e3c33-1
""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_combined_score( self, document: Document, vector_relevance: Optional[float], current_time: datetime.datetime, ) -> float: """Return the combined score for a document.""" hours_passed = _get_hours_passed( current_time, document.metadata["last_accessed_at"], ) score = (1.0 - self.decay_rate) ** hours_passed for key in self.other_score_keys: if key in document.metadata: score += document.metadata[key] if vector_relevance is not None: score += vector_relevance return score [docs] def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]: """Return documents that are salient to the query.""" docs_and_scores: List[Tuple[Document, float]] docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs ) results = {} for fetched_doc, relevance in docs_and_scores: if "buffer_idx" in fetched_doc.metadata: buffer_idx = fetched_doc.metadata["buffer_idx"] doc = self.memory_stream[buffer_idx] results[buffer_idx] = (doc, relevance) return results def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """Return documents that are relevant to the query.""" current_time = datetime.datetime.now() docs_and_scores = {
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
13bfd67e3c33-2
current_time = datetime.datetime.now() docs_and_scores = { doc.metadata["buffer_idx"]: (doc, self.default_salience) for doc in self.memory_stream[-self.k :] } # If a doc is considered salient, update the salience score docs_and_scores.update(self.get_salient_docs(query)) rescored_docs = [ (doc, self._get_combined_score(doc, relevance, current_time)) for doc, relevance in docs_and_scores.values() ] rescored_docs.sort(key=lambda x: x[1], reverse=True) result = [] # Ensure frequently accessed memories aren't forgotten for doc, _ in rescored_docs[: self.k]: # TODO: Update vector store doc once `update` method is exposed. buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]] buffered_doc.metadata["last_accessed_at"] = current_time result.append(buffered_doc) return result [docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
13bfd67e3c33-3
self.memory_stream.extend(dup_docs) return self.vectorstore.add_documents(dup_docs, **kwargs) [docs] async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
216e490ba4e3-0
Source code for langchain.retrievers.web_research import logging import re from typing import List, Optional from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.chains import LLMChain from langchain.chains.prompt_selector import ConditionalPromptSelector from langchain.document_loaders import AsyncHtmlLoader from langchain.document_transformers import Html2TextTransformer from langchain.llms import LlamaCpp from langchain.llms.base import BaseLLM from langchain.output_parsers.pydantic import PydanticOutputParser from langchain.prompts import BasePromptTemplate, PromptTemplate from langchain.schema import BaseRetriever, Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.utilities import GoogleSearchAPIWrapper from langchain.vectorstores.base import VectorStore logger = logging.getLogger(__name__) [docs]class SearchQueries(BaseModel): """Search queries to run to research for the user's goal.""" queries: List[str] = Field( ..., description="List of search queries to look up on Google" ) DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate( input_variables=["question"], template="""<<SYS>> \n You are an assistant tasked with improving Google search \ results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that \ are similar to this question. The output should be a numbered list of questions \ and each should have a question mark at the end: \n\n {question} [/INST]""", ) DEFAULT_SEARCH_PROMPT = PromptTemplate( input_variables=["question"], template="""You are an assistant tasked with improving Google search \
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/web_research.html
216e490ba4e3-1
template="""You are an assistant tasked with improving Google search \ results. Generate THREE Google search queries that are similar to \ this question. The output should be a numbered list of questions and each \ should have a question mark at the end: {question}""", ) [docs]class LineList(BaseModel): """List of questions.""" lines: List[str] = Field(description="Questions") [docs]class QuestionListOutputParser(PydanticOutputParser): """Output parser for a list of numbered questions.""" def __init__(self) -> None: super().__init__(pydantic_object=LineList) [docs] def parse(self, text: str) -> LineList: lines = re.findall(r"\d+\..*?\n", text) return LineList(lines=lines) [docs]class WebResearchRetriever(BaseRetriever): """Retriever for web research based on the Google Search API.""" # Inputs vectorstore: VectorStore = Field( ..., description="Vector store for storing web pages" ) llm_chain: LLMChain search: GoogleSearchAPIWrapper = Field(..., description="Google Search API Wrapper") num_search_results: int = Field(1, description="Number of pages per Google search") text_splitter: RecursiveCharacterTextSplitter = Field( RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50), description="Text splitter for splitting web pages into chunks", ) url_database: List[str] = Field( default_factory=list, description="List of processed URLs" ) [docs] @classmethod def from_llm( cls, vectorstore: VectorStore,
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/web_research.html
216e490ba4e3-2
def from_llm( cls, vectorstore: VectorStore, llm: BaseLLM, search: GoogleSearchAPIWrapper, prompt: Optional[BasePromptTemplate] = None, num_search_results: int = 1, text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter( chunk_size=1500, chunk_overlap=150 ), ) -> "WebResearchRetriever": """Initialize from llm using default template. Args: vectorstore: Vector store for storing web pages llm: llm for search question generation search: GoogleSearchAPIWrapper prompt: prompt to generating search questions num_search_results: Number of pages per Google search text_splitter: Text splitter for splitting web pages into chunks Returns: WebResearchRetriever """ if not prompt: QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector( default_prompt=DEFAULT_SEARCH_PROMPT, conditionals=[ (lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT) ], ) prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm) # Use chat model prompt llm_chain = LLMChain( llm=llm, prompt=prompt, output_parser=QuestionListOutputParser(), ) return cls( vectorstore=vectorstore, llm_chain=llm_chain, search=search, num_search_results=num_search_results, text_splitter=text_splitter, ) [docs] def clean_search_query(self, query: str) -> str: # Some search tools (e.g., Google) will
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/web_research.html
216e490ba4e3-3
# Some search tools (e.g., Google) will # fail to return results if query has a # leading digit: 1. "LangCh..." # Check if the first character is a digit if query[0].isdigit(): # Find the position of the first quote first_quote_pos = query.find('"') if first_quote_pos != -1: # Extract the part of the string after the quote query = query[first_quote_pos + 1 :] # Remove the trailing quote if present if query.endswith('"'): query = query[:-1] return query.strip() [docs] def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]: """Returns num_serch_results pages per Google search.""" query_clean = self.clean_search_query(query) result = self.search.results(query_clean, num_search_results) return result def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Search Google for documents related to the query input. Args: query: user query Returns: Relevant documents from all various urls. """ # Get search questions logger.info("Generating questions for Google Search ...") result = self.llm_chain({"question": query}) logger.info(f"Questions for Google Search (raw): {result}") questions = getattr(result["text"], "lines", []) logger.info(f"Questions for Google Search: {questions}") # Get urls logger.info("Searching for relevat urls ...") urls_to_look = [] for query in questions:
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/web_research.html
216e490ba4e3-4
urls_to_look = [] for query in questions: # Google search search_results = self.search_tool(query, self.num_search_results) logger.info("Searching for relevat urls ...") logger.info(f"Search results: {search_results}") for res in search_results: urls_to_look.append(res["link"]) # Relevant urls urls = set(urls_to_look) # Check for any new urls that we have not processed new_urls = list(urls.difference(self.url_database)) logger.info(f"New URLs to load: {new_urls}") # Load, split, and add new urls to vectorstore if new_urls: loader = AsyncHtmlLoader(new_urls) html2text = Html2TextTransformer() logger.info("Indexing new urls...") docs = loader.load() docs = list(html2text.transform_documents(docs)) docs = self.text_splitter.split_documents(docs) self.vectorstore.add_documents(docs) self.url_database.extend(new_urls) # Search for relevant splits # TODO: make this async logger.info("Grabbing most relevant splits from urls...") docs = [] for query in questions: docs.extend(self.vectorstore.similarity_search(query)) # Get unique docs unique_documents_dict = { (doc.page_content, tuple(sorted(doc.metadata.items()))): doc for doc in docs } unique_documents = list(unique_documents_dict.values()) return unique_documents async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: raise NotImplementedError
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/web_research.html
2ea2141e04e1-0
Source code for langchain.retrievers.merger_retriever from typing import List from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.schema import BaseRetriever, Document [docs]class MergerRetriever(BaseRetriever): """Retriever that merges the results of multiple retrievers.""" retrievers: List[BaseRetriever] """A list of retrievers to merge.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """ Get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of relevant documents. """ # Merge the results of the retrievers. merged_documents = self.merge_documents(query, run_manager) return merged_documents async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: """ Asynchronously get the relevant documents for a given query. Args: query: The query to search for. Returns: A list of relevant documents. """ # Merge the results of the retrievers. merged_documents = await self.amerge_documents(query, run_manager) return merged_documents [docs] def merge_documents( self, query: str, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: """ Merge the results of the retrievers.
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/merger_retriever.html
2ea2141e04e1-1
""" Merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents. """ # Get the results of all retrievers. retriever_docs = [ retriever.get_relevant_documents( query, callbacks=run_manager.get_child("retriever_{}".format(i + 1)) ) for i, retriever in enumerate(self.retrievers) ] # Merge the results of the retrievers. merged_documents = [] max_docs = max(len(docs) for docs in retriever_docs) for i in range(max_docs): for retriever, doc in zip(self.retrievers, retriever_docs): if i < len(doc): merged_documents.append(doc[i]) return merged_documents [docs] async def amerge_documents( self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: """ Asynchronously merge the results of the retrievers. Args: query: The query to search for. Returns: A list of merged documents. """ # Get the results of all retrievers. retriever_docs = [ await retriever.aget_relevant_documents( query, callbacks=run_manager.get_child("retriever_{}".format(i + 1)) ) for i, retriever in enumerate(self.retrievers) ] # Merge the results of the retrievers. merged_documents = [] max_docs = max(len(docs) for docs in retriever_docs) for i in range(max_docs):
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/merger_retriever.html
2ea2141e04e1-2
for i in range(max_docs): for retriever, doc in zip(self.retrievers, retriever_docs): if i < len(doc): merged_documents.append(doc[i]) return merged_documents
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/merger_retriever.html
848939bf8ec6-0
Source code for langchain.retrievers.remote_retriever from typing import List, Optional import aiohttp import requests from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.schema import BaseRetriever, Document [docs]class RemoteLangChainRetriever(BaseRetriever): """Retriever for remote LangChain API.""" url: str """URL of the remote LangChain API.""" headers: Optional[dict] = None """Headers to use for the request.""" input_key: str = "message" """Key to use for the input in the request.""" response_key: str = "response" """Key to use for the response in the request.""" page_content_key: str = "page_content" """Key to use for the page content in the response.""" metadata_key: str = "metadata" """Key to use for the metadata in the response.""" def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: response = requests.post( self.url, json={self.input_key: query}, headers=self.headers ) result = response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ] async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun ) -> List[Document]: async with aiohttp.ClientSession() as session: async with session.request(
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/remote_retriever.html
848939bf8ec6-1
async with aiohttp.ClientSession() as session: async with session.request( "POST", self.url, headers=self.headers, json={self.input_key: query} ) as response: result = await response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ]
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/remote_retriever.html
102867fd5ece-0
Source code for langchain.retrievers.re_phraser import logging from typing import List from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, ) from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseRetriever, Document logger = logging.getLogger(__name__) # Default template DEFAULT_TEMPLATE = """You are an assistant tasked with taking a natural language \ query from a user and converting it into a query for a vectorstore. \ In this process, you strip out information that is not relevant for \ the retrieval task. Here is the user query: {question}""" # Default prompt DEFAULT_QUERY_PROMPT = PromptTemplate.from_template(DEFAULT_TEMPLATE) [docs]class RePhraseQueryRetriever(BaseRetriever): """Given a user query, use an LLM to re-phrase it. Then, retrieve docs for re-phrased query.""" retriever: BaseRetriever llm_chain: LLMChain [docs] @classmethod def from_llm( cls, retriever: BaseRetriever, llm: BaseLLM, prompt: PromptTemplate = DEFAULT_QUERY_PROMPT, ) -> "RePhraseQueryRetriever": """Initialize from llm using default template. The prompt used here expects a single input: `question` Args: retriever: retriever to query documents from llm: llm for query generation using DEFAULT_QUERY_PROMPT prompt: prompt template for query generation Returns: RePhraseQueryRetriever """
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/re_phraser.html
102867fd5ece-1
Returns: RePhraseQueryRetriever """ llm_chain = LLMChain(llm=llm, prompt=prompt) return cls( retriever=retriever, llm_chain=llm_chain, ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> List[Document]: """Get relevated documents given a user question. Args: query: user question Returns: Relevant documents for re-phrased question """ response = self.llm_chain(query, callbacks=run_manager.get_child()) re_phrased_question = response["text"] logger.info(f"Re-phrased question: {re_phrased_question}") docs = self.retriever.get_relevant_documents( re_phrased_question, callbacks=run_manager.get_child() ) return docs async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> List[Document]: raise NotImplementedError
https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/re_phraser.html