id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
69b8a42731d2-0
Source code for langchain.chains.graph_qa.base """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ENTITY_EXTRACTION_PROMPT, GRAPH_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, get_entities from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel [docs]class GraphQAChain(Chain): """Chain for question-answering against a graph.""" graph: NetworkxEntityGraph = Field(exclude=True) entity_extraction_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, qa_prompt: BasePromptTemplate = GRAPH_QA_PROMPT, entity_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT, **kwargs: Any, ) -> GraphQAChain: """Initialize from LLM."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/base.html
69b8a42731d2-1
) -> GraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) entity_chain = LLMChain(llm=llm, prompt=entity_prompt) return cls( qa_chain=qa_chain, entity_extraction_chain=entity_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Extract entities, look up info and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] entity_string = self.entity_extraction_chain.run(question) _run_manager.on_text("Entities Extracted:", end="\n", verbose=self.verbose) _run_manager.on_text( entity_string, color="green", end="\n", verbose=self.verbose ) entities = get_entities(entity_string) context = "" all_triplets = [] for entity in entities: all_triplets.extend(self.graph.get_entity_knowledge(entity)) context = "\n".join(all_triplets) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text(context, color="green", end="\n", verbose=self.verbose) result = self.qa_chain( {"question": question, "context": context}, callbacks=_run_manager.get_child(), ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/base.html
9c4678175048-0
Source code for langchain.chains.graph_qa.neptune_cypher from __future__ import annotations import re from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( CYPHER_QA_PROMPT, NEPTUNE_OPENCYPHER_GENERATION_PROMPT, ) from langchain.chains.llm import LLMChain from langchain.graphs import NeptuneGraph from langchain.prompts.base import BasePromptTemplate INTERMEDIATE_STEPS_KEY = "intermediate_steps" [docs]def extract_cypher(text: str) -> str: """Extract Cypher code from text using Regex.""" # The pattern to find Cypher code enclosed in triple backticks pattern = r"```(.*?)```" # Find all matches in the input text matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text [docs]class NeptuneOpenCypherQAChain(Chain): """Chain for question-answering against a Neptune graph by generating openCypher statements. Example: .. code-block:: python chain = NeptuneOpenCypherQAChain.from_llm( llm=llm, graph=graph ) response = chain.run(query) """ graph: NeptuneGraph = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/neptune_cypher.html
9c4678175048-1
output_key: str = "result" #: :meta private: top_k: int = 10 return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the graph directly.""" @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate = NEPTUNE_OPENCYPHER_GENERATION_PROMPT, **kwargs: Any, ) -> NeptuneOpenCypherQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt) return cls( qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/neptune_cypher.html
9c4678175048-2
"""Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) # Extract Cypher code if it is wrapped in backticks generated_cypher = extract_cypher(generated_cypher) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"query": generated_cypher}) context = self.graph.query(generated_cypher) if self.return_direct: final_result = context else: _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"context": context}) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/neptune_cypher.html
38e028dd246e-0
Source code for langchain.chains.graph_qa.kuzu """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, KUZU_GENERATION_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.kuzu_graph import KuzuGraph from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel [docs]class KuzuQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements for Kùzu. """ graph: KuzuGraph = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate = KUZU_GENERATION_PROMPT,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/kuzu.html
38e028dd246e-1
cypher_prompt: BasePromptTemplate = KUZU_GENERATION_PROMPT, **kwargs: Any, ) -> KuzuQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt) return cls( qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_cypher) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/kuzu.html
38e028dd246e-2
callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/kuzu.html
f6df3d4a090d-0
Source code for langchain.chains.graph_qa.cypher """Question answering over a graph.""" from __future__ import annotations import re from typing import Any, Dict, List, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.neo4j_graph import Neo4jGraph from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel INTERMEDIATE_STEPS_KEY = "intermediate_steps" [docs]def extract_cypher(text: str) -> str: """Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text. """ # The pattern to find Cypher code enclosed in triple backticks pattern = r"```(.*?)```" # Find all matches in the input text matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text [docs]class GraphCypherQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements.""" graph: Neo4jGraph = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: top_k: int = 10 """Number of results to return from the query"""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/cypher.html
f6df3d4a090d-1
"""Number of results to return from the query""" return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the graph directly.""" @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @property def _chain_type(self) -> str: return "graph_cypher_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate = CYPHER_GENERATION_PROMPT, **kwargs: Any, ) -> GraphCypherQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt) return cls( qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/cypher.html
f6df3d4a090d-2
) -> Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) # Extract Cypher code if it is wrapped in backticks generated_cypher = extract_cypher(generated_cypher) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"query": generated_cypher}) # Retrieve and limit the number of results context = self.graph.query(generated_cypher)[: self.top_k] if self.return_direct: final_result = context else: _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"context": context}) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/cypher.html
30ca03d7b6a5-0
Source code for langchain.chains.graph_qa.sparql """ Question answering over an RDF or OWL graph using SPARQL. """ from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( SPARQL_GENERATION_SELECT_PROMPT, SPARQL_GENERATION_UPDATE_PROMPT, SPARQL_INTENT_PROMPT, SPARQL_QA_PROMPT, ) from langchain.chains.llm import LLMChain from langchain.graphs.rdf_graph import RdfGraph from langchain.prompts.base import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel [docs]class GraphSparqlQAChain(Chain): """ Chain for question-answering against an RDF or OWL graph by generating SPARQL statements. """ graph: RdfGraph = Field(exclude=True) sparql_generation_select_chain: LLMChain sparql_generation_update_chain: LLMChain sparql_intent_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/sparql.html
30ca03d7b6a5-1
cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = SPARQL_QA_PROMPT, sparql_select_prompt: BasePromptTemplate = SPARQL_GENERATION_SELECT_PROMPT, sparql_update_prompt: BasePromptTemplate = SPARQL_GENERATION_UPDATE_PROMPT, sparql_intent_prompt: BasePromptTemplate = SPARQL_INTENT_PROMPT, **kwargs: Any, ) -> GraphSparqlQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) sparql_generation_select_chain = LLMChain(llm=llm, prompt=sparql_select_prompt) sparql_generation_update_chain = LLMChain(llm=llm, prompt=sparql_update_prompt) sparql_intent_chain = LLMChain(llm=llm, prompt=sparql_intent_prompt) return cls( qa_chain=qa_chain, sparql_generation_select_chain=sparql_generation_select_chain, sparql_generation_update_chain=sparql_generation_update_chain, sparql_intent_chain=sparql_intent_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """ Generate SPARQL query, use it to retrieve a response from the gdb and answer the question. """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() prompt = inputs[self.input_key]
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/sparql.html
30ca03d7b6a5-2
callbacks = _run_manager.get_child() prompt = inputs[self.input_key] _intent = self.sparql_intent_chain.run({"prompt": prompt}, callbacks=callbacks) intent = _intent.strip() if "SELECT" not in intent and "UPDATE" not in intent: raise ValueError( "I am sorry, but this prompt seems to fit none of the currently " "supported SPARQL query types, i.e., SELECT and UPDATE." ) elif intent.find("SELECT") < intent.find("UPDATE"): sparql_generation_chain = self.sparql_generation_select_chain intent = "SELECT" else: sparql_generation_chain = self.sparql_generation_update_chain intent = "UPDATE" _run_manager.on_text("Identified intent:", end="\n", verbose=self.verbose) _run_manager.on_text(intent, color="green", end="\n", verbose=self.verbose) generated_sparql = sparql_generation_chain.run( {"prompt": prompt, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated SPARQL:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_sparql, color="green", end="\n", verbose=self.verbose ) if intent == "SELECT": context = self.graph.query(generated_sparql) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"prompt": prompt, "context": context}, callbacks=callbacks, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/sparql.html
30ca03d7b6a5-3
callbacks=callbacks, ) res = result[self.qa_chain.output_key] elif intent == "UPDATE": self.graph.update(generated_sparql) res = "Successfully inserted triples into the graph." else: raise ValueError("Unsupported SPARQL query type.") return {self.output_key: res}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/sparql.html
ec05c5686a64-0
Source code for langchain.chains.graph_qa.hugegraph """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( CYPHER_QA_PROMPT, GREMLIN_GENERATION_PROMPT, ) from langchain.chains.llm import LLMChain from langchain.graphs.hugegraph import HugeGraph from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel [docs]class HugeGraphQAChain(Chain): """Chain for question-answering against a graph by generating gremlin statements.""" graph: HugeGraph = Field(exclude=True) gremlin_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, gremlin_prompt: BasePromptTemplate = GREMLIN_GENERATION_PROMPT, **kwargs: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/hugegraph.html
ec05c5686a64-1
**kwargs: Any, ) -> HugeGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) gremlin_generation_chain = LLMChain(llm=llm, prompt=gremlin_prompt) return cls( qa_chain=qa_chain, gremlin_generation_chain=gremlin_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate gremlin statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_gremlin = self.gremlin_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated gremlin:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_gremlin, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_gremlin) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/hugegraph.html
fabf187699f7-0
Source code for langchain.chains.conversational_retrieval.base """Chain for chatting with a vector database.""" from __future__ import annotations import inspect import warnings from abc import abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.schema import BasePromptTemplate, BaseRetriever, Document from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage from langchain.vectorstores.base import VectorStore # Depending on the memory type and configuration, the chat history format may differ. # This needs to be consolidated. CHAT_TURN_TYPE = Union[Tuple[str, str], BaseMessage] _ROLE_MAP = {"human": "Human: ", "ai": "Assistant: "} def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str: buffer = "" for dialogue_turn in chat_history: if isinstance(dialogue_turn, BaseMessage): role_prefix = _ROLE_MAP.get(dialogue_turn.type, f"{dialogue_turn.type}: ") buffer += f"\n{role_prefix}{dialogue_turn.content}" elif isinstance(dialogue_turn, tuple):
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-1
elif isinstance(dialogue_turn, tuple): human = "Human: " + dialogue_turn[0] ai = "Assistant: " + dialogue_turn[1] buffer += "\n" + "\n".join([human, ai]) else: raise ValueError( f"Unsupported chat history format: {type(dialogue_turn)}." f" Full chat history: {chat_history} " ) return buffer [docs]class BaseConversationalRetrievalChain(Chain): """Chain for chatting with an index.""" combine_docs_chain: BaseCombineDocumentsChain """The chain used to combine any retrieved documents.""" question_generator: LLMChain """The chain used to generate a new question for the sake of retrieval. This chain will take in the current question (with variable `question`) and any chat history (with variable `chat_history`) and will produce a new standalone question to be used later on.""" output_key: str = "answer" """The output key to return the final answer of this chain in.""" rephrase_question: bool = True """Whether or not to pass the new generated question to the combine_docs_chain. If True, will pass the new generated question along. If False, will only use the new generated question for retrieval and pass the original question along to the combine_docs_chain.""" return_source_documents: bool = False """Return the retrieved source documents as part of the final result.""" return_generated_question: bool = False """Return the generated question as part of the final result.""" get_chat_history: Optional[Callable[[List[CHAT_TURN_TYPE]], str]] = None """An optional function to get a string of the chat history.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-2
"""An optional function to get a string of the chat history. If None is provided, will use a default.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Input keys.""" return ["question", "chat_history"] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] if self.return_generated_question: _output_keys = _output_keys + ["generated_question"] return _output_keys @abstractmethod def _get_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: callbacks = _run_manager.get_child() new_question = self.question_generator.run( question=question, chat_history=chat_history_str, callbacks=callbacks ) else:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-3
) else: new_question = question accepts_run_manager = ( "run_manager" in inspect.signature(self._get_docs).parameters ) if accepts_run_manager: docs = self._get_docs(new_question, inputs, run_manager=_run_manager) else: docs = self._get_docs(new_question, inputs) # type: ignore[call-arg] new_inputs = inputs.copy() if self.rephrase_question: new_inputs["question"] = new_question new_inputs["chat_history"] = chat_history_str answer = self.combine_docs_chain.run( input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs ) output: Dict[str, Any] = {self.output_key: answer} if self.return_source_documents: output["source_documents"] = docs if self.return_generated_question: output["generated_question"] = new_question return output @abstractmethod async def _aget_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: callbacks = _run_manager.get_child()
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-4
if chat_history_str: callbacks = _run_manager.get_child() new_question = await self.question_generator.arun( question=question, chat_history=chat_history_str, callbacks=callbacks ) else: new_question = question accepts_run_manager = ( "run_manager" in inspect.signature(self._aget_docs).parameters ) if accepts_run_manager: docs = await self._aget_docs(new_question, inputs, run_manager=_run_manager) else: docs = await self._aget_docs(new_question, inputs) # type: ignore[call-arg] new_inputs = inputs.copy() if self.rephrase_question: new_inputs["question"] = new_question new_inputs["chat_history"] = chat_history_str answer = await self.combine_docs_chain.arun( input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs ) output: Dict[str, Any] = {self.output_key: answer} if self.return_source_documents: output["source_documents"] = docs if self.return_generated_question: output["generated_question"] = new_question return output [docs] def save(self, file_path: Union[Path, str]) -> None: if self.get_chat_history: raise ValueError("Chain not saveable when `get_chat_history` is not None.") super().save(file_path) [docs]class ConversationalRetrievalChain(BaseConversationalRetrievalChain): """Chain for having a conversation based on retrieved documents. This chain takes in chat history (a list of messages) and new questions, and then returns an answer to that question. The algorithm for this chain consists of three parts:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-5
The algorithm for this chain consists of three parts: 1. Use the chat history and the new question to create a "standalone question". This is done so that this question can be passed into the retrieval step to fetch relevant documents. If only the new question was passed in, then relevant context may be lacking. If the whole conversation was passed into retrieval, there may be unnecessary information there that would distract from retrieval. 2. This new question is passed to the retriever and relevant documents are returned. 3. The retrieved documents are passed to an LLM along with either the new question (default behavior) or the original question and chat history to generate a final response. Example: .. code-block:: python from langchain.chains import ( StuffDocumentsChain, LLMChain, ConversationalRetrievalChain ) from langchain.prompts import PromptTemplate from langchain.llms import OpenAI combine_docs_chain = StuffDocumentsChain(...) vectorstore = ... retriever = vectorstore.as_retriever() # This controls how the standalone question is generated. # Should take `chat_history` and `question` as input variables. template = ( "Combine the chat history and follow up question into " "a standalone question. Chat History: {chat_history}" "Follow up question: {question}" ) prompt = PromptTemplate.from_template(template) llm = OpenAI() question_generator_chain = LLMChain(llm=llm, prompt=prompt) chain = ConversationalRetrievalChain( combine_docs_chain=combine_docs_chain, retriever=retriever, question_generator=question_generator_chain, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-6
retriever=retriever, question_generator=question_generator_chain, ) """ retriever: BaseRetriever """Retriever to use to fetch documents.""" max_tokens_limit: Optional[int] = None """If set, enforces that the documents returned are less than this limit. This is only enforced if `combine_docs_chain` is of type StuffDocumentsChain.""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs) if self.max_tokens_limit and isinstance( self.combine_docs_chain, StuffDocumentsChain ): tokens = [ self.combine_docs_chain.llm_chain.llm.get_num_tokens(doc.page_content) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" docs = self.retriever.get_relevant_documents( question, callbacks=run_manager.get_child() ) return self._reduce_tokens_below_limit(docs) async def _aget_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" docs = await self.retriever.aget_relevant_documents( question, callbacks=run_manager.get_child() )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-7
question, callbacks=run_manager.get_child() ) return self._reduce_tokens_below_limit(docs) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, retriever: BaseRetriever, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, chain_type: str = "stuff", verbose: bool = False, condense_question_llm: Optional[BaseLanguageModel] = None, combine_docs_chain_kwargs: Optional[Dict] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseConversationalRetrievalChain: """Convenience method to load chain from LLM and retriever. This provides some logic to create the `question_generator` chain as well as the combine_docs_chain. Args: llm: The default language model to use at every part of this chain (eg in both the question generation and the answering) retriever: The retriever to use to fetch relevant documents from. condense_question_prompt: The prompt to use to condense the chat history and new question into a standalone question. chain_type: The chain type to use to create the combine_docs_chain, will be sent to `load_qa_chain`. verbose: Verbosity flag for logging to stdout. condense_question_llm: The language model to use for condensing the chat history and new question into a standalone question. If none is provided, will default to `llm`. combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain` when constructing the combine_docs_chain. callbacks: Callbacks to pass to all subchains.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-8
callbacks: Callbacks to pass to all subchains. **kwargs: Additional parameters to pass when initializing ConversationalRetrievalChain """ combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} doc_chain = load_qa_chain( llm, chain_type=chain_type, verbose=verbose, callbacks=callbacks, **combine_docs_chain_kwargs, ) _llm = condense_question_llm or llm condense_question_chain = LLMChain( llm=_llm, prompt=condense_question_prompt, verbose=verbose, callbacks=callbacks, ) return cls( retriever=retriever, combine_docs_chain=doc_chain, question_generator=condense_question_chain, callbacks=callbacks, **kwargs, ) [docs]class ChatVectorDBChain(BaseConversationalRetrievalChain): """Chain for chatting with a vector database.""" vectorstore: VectorStore = Field(alias="vectorstore") top_k_docs_for_context: int = 4 search_kwargs: dict = Field(default_factory=dict) @property def _chain_type(self) -> str: return "chat-vector-db" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`ChatVectorDBChain` is deprecated - " "please use `from langchain.chains import ConversationalRetrievalChain`" ) return values def _get_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-9
run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" vectordbkwargs = inputs.get("vectordbkwargs", {}) full_kwargs = {**self.search_kwargs, **vectordbkwargs} return self.vectorstore.similarity_search( question, k=self.top_k_docs_for_context, **full_kwargs ) async def _aget_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" raise NotImplementedError("ChatVectorDBChain does not support async") [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, chain_type: str = "stuff", combine_docs_chain_kwargs: Optional[Dict] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseConversationalRetrievalChain: """Load chain from LLM.""" combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} doc_chain = load_qa_chain( llm, chain_type=chain_type, callbacks=callbacks, **combine_docs_chain_kwargs, ) condense_question_chain = LLMChain( llm=llm, prompt=condense_question_prompt, callbacks=callbacks ) return cls( vectorstore=vectorstore, combine_docs_chain=doc_chain, question_generator=condense_question_chain, callbacks=callbacks, **kwargs,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
fabf187699f7-10
callbacks=callbacks, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
364a91565e65-0
Source code for langchain.chains.hyde.base """Hypothetical Document Embeddings. https://arxiv.org/abs/2212.10496 """ from __future__ import annotations from typing import Any, Dict, List, Optional import numpy as np from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.hyde.prompts import PROMPT_MAP from langchain.chains.llm import LLMChain from langchain.embeddings.base import Embeddings from langchain.schema.language_model import BaseLanguageModel [docs]class HypotheticalDocumentEmbedder(Chain, Embeddings): """Generate hypothetical document for query, and then embed that. Based on https://arxiv.org/abs/2212.10496 """ base_embeddings: Embeddings llm_chain: LLMChain class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Input keys for Hyde's LLM chain.""" return self.llm_chain.input_keys @property def output_keys(self) -> List[str]: """Output keys for Hyde's LLM chain.""" return self.llm_chain.output_keys [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call the base embeddings.""" return self.base_embeddings.embed_documents(texts) [docs] def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]: """Combine embeddings into final embeddings.""" return list(np.array(embeddings).mean(axis=0))
https://api.python.langchain.com/en/latest/_modules/langchain/chains/hyde/base.html
364a91565e65-1
return list(np.array(embeddings).mean(axis=0)) [docs] def embed_query(self, text: str) -> List[float]: """Generate a hypothetical document and embedded it.""" var_name = self.llm_chain.input_keys[0] result = self.llm_chain.generate([{var_name: text}]) documents = [generation.text for generation in result.generations[0]] embeddings = self.embed_documents(documents) return self.combine_embeddings(embeddings) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Call the internal llm chain.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() return self.llm_chain(inputs, callbacks=_run_manager.get_child()) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, base_embeddings: Embeddings, prompt_key: str, **kwargs: Any, ) -> HypotheticalDocumentEmbedder: """Load and use LLMChain for a specific prompt key.""" prompt = PROMPT_MAP[prompt_key] llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs) @property def _chain_type(self) -> str: return "hyde_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/hyde/base.html
ab37b6a75b79-0
Source code for langchain.chains.llm_math.base """Chain that interprets a prompt and executes python code to do math.""" from __future__ import annotations import math import re import warnings from typing import Any, Dict, List, Optional import numexpr from pydantic import Extra, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_math.prompt import PROMPT from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel [docs]class LLMMathChain(Chain): """Chain that interprets a prompt and executes python code to do math. Example: .. code-block:: python from langchain import LLMMathChain, OpenAI llm_math = LLMMathChain.from_llm(OpenAI()) """ llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" prompt: BasePromptTemplate = PROMPT """[Deprecated] Prompt to use to translate to python if necessary.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
ab37b6a75b79-1
if "llm" in values: warnings.warn( "Directly instantiating an LLMMathChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: prompt = values.get("prompt", PROMPT) values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _evaluate_expression(self, expression: str) -> str: try: local_dict = {"pi": math.pi, "e": math.e} output = str( numexpr.evaluate( expression.strip(), global_dict={}, # restrict access to globals local_dict=local_dict, # add common mathematical functions ) ) except Exception as e: raise ValueError( f'LLMMathChain._evaluate("{expression}") raised error: {e}.' " Please try again with a valid numerical expression" ) # Remove any leading and trailing brackets from the output return re.sub(r"^\[|\]$", "", output) def _process_llm_result( self, llm_output: str, run_manager: CallbackManagerForChainRun ) -> Dict[str, str]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
ab37b6a75b79-2
) -> Dict[str, str]: run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) run_manager.on_text("\nAnswer: ", verbose=self.verbose) run_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} async def _aprocess_llm_result( self, llm_output: str, run_manager: AsyncCallbackManagerForChainRun, ) -> Dict[str, str]: await run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) await run_manager.on_text("\nAnswer: ", verbose=self.verbose) await run_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif llm_output.startswith("Answer:"): answer = llm_output
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
ab37b6a75b79-3
elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key]) llm_output = self.llm_chain.predict( question=inputs[self.input_key], stop=["```output"], callbacks=_run_manager.get_child(), ) return self._process_llm_result(llm_output, _run_manager) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() await _run_manager.on_text(inputs[self.input_key]) llm_output = await self.llm_chain.apredict( question=inputs[self.input_key], stop=["```output"], callbacks=_run_manager.get_child(), ) return await self._aprocess_llm_result(llm_output, _run_manager) @property def _chain_type(self) -> str: return "llm_math_chain" [docs] @classmethod def from_llm( cls,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
ab37b6a75b79-4
[docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate = PROMPT, **kwargs: Any, ) -> LLMMathChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
5180c41ddf34-0
Source code for langchain.chains.flare.base from __future__ import annotations import re from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple import numpy as np from pydantic import Field from langchain.callbacks.manager import ( CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.flare.prompts import ( PROMPT, QUESTION_GENERATOR_PROMPT, FinishedOutputParser, ) from langchain.chains.llm import LLMChain from langchain.llms import OpenAI from langchain.schema import BasePromptTemplate, BaseRetriever, Generation from langchain.schema.language_model import BaseLanguageModel class _ResponseChain(LLMChain): """Base class for chains that generate responses.""" prompt: BasePromptTemplate = PROMPT @property def input_keys(self) -> List[str]: return self.prompt.input_variables def generate_tokens_and_log_probs( self, _input: Dict[str, Any], *, run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Tuple[Sequence[str], Sequence[float]]: llm_result = self.generate([_input], run_manager=run_manager) return self._extract_tokens_and_log_probs(llm_result.generations[0]) @abstractmethod def _extract_tokens_and_log_probs( self, generations: List[Generation] ) -> Tuple[Sequence[str], Sequence[float]]: """Extract tokens and log probs from response.""" class _OpenAIResponseChain(_ResponseChain): """Chain that generates responses from user input and context.""" llm: OpenAI = Field( default_factory=lambda: OpenAI(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
5180c41ddf34-1
llm: OpenAI = Field( default_factory=lambda: OpenAI( max_tokens=32, model_kwargs={"logprobs": 1}, temperature=0 ) ) def _extract_tokens_and_log_probs( self, generations: List[Generation] ) -> Tuple[Sequence[str], Sequence[float]]: tokens = [] log_probs = [] for gen in generations: if gen.generation_info is None: raise ValueError tokens.extend(gen.generation_info["logprobs"]["tokens"]) log_probs.extend(gen.generation_info["logprobs"]["token_logprobs"]) return tokens, log_probs [docs]class QuestionGeneratorChain(LLMChain): """Chain that generates questions from uncertain spans.""" prompt: BasePromptTemplate = QUESTION_GENERATOR_PROMPT """Prompt template for the chain.""" @property def input_keys(self) -> List[str]: """Input keys for the chain.""" return ["user_input", "context", "response"] def _low_confidence_spans( tokens: Sequence[str], log_probs: Sequence[float], min_prob: float, min_token_gap: int, num_pad_tokens: int, ) -> List[str]: _low_idx = np.where(np.exp(log_probs) < min_prob)[0] low_idx = [i for i in _low_idx if re.search(r"\w", tokens[i])] if len(low_idx) == 0: return [] spans = [[low_idx[0], low_idx[0] + num_pad_tokens + 1]] for i, idx in enumerate(low_idx[1:]): end = idx + num_pad_tokens + 1
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
5180c41ddf34-2
end = idx + num_pad_tokens + 1 if idx - low_idx[i] < min_token_gap: spans[-1][1] = end else: spans.append([idx, end]) return ["".join(tokens[start:end]) for start, end in spans] [docs]class FlareChain(Chain): """Chain that combines a retriever, a question generator, and a response generator.""" question_generator_chain: QuestionGeneratorChain """Chain that generates questions from uncertain spans.""" response_chain: _ResponseChain = Field(default_factory=_OpenAIResponseChain) """Chain that generates responses from user input and context.""" output_parser: FinishedOutputParser = Field(default_factory=FinishedOutputParser) """Parser that determines whether the chain is finished.""" retriever: BaseRetriever """Retriever that retrieves relevant documents from a user input.""" min_prob: float = 0.2 """Minimum probability for a token to be considered low confidence.""" min_token_gap: int = 5 """Minimum number of tokens between two low confidence spans.""" num_pad_tokens: int = 2 """Number of tokens to pad around a low confidence span.""" max_iter: int = 10 """Maximum number of iterations.""" start_with_retrieval: bool = True """Whether to start with retrieval.""" @property def input_keys(self) -> List[str]: """Input keys for the chain.""" return ["user_input"] @property def output_keys(self) -> List[str]: """Output keys for the chain.""" return ["response"] def _do_generation( self, questions: List[str], user_input: str,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
5180c41ddf34-3
self, questions: List[str], user_input: str, response: str, _run_manager: CallbackManagerForChainRun, ) -> Tuple[str, bool]: callbacks = _run_manager.get_child() docs = [] for question in questions: docs.extend(self.retriever.get_relevant_documents(question)) context = "\n\n".join(d.page_content for d in docs) result = self.response_chain.predict( user_input=user_input, context=context, response=response, callbacks=callbacks, ) marginal, finished = self.output_parser.parse(result) return marginal, finished def _do_retrieval( self, low_confidence_spans: List[str], _run_manager: CallbackManagerForChainRun, user_input: str, response: str, initial_response: str, ) -> Tuple[str, bool]: question_gen_inputs = [ { "user_input": user_input, "current_response": initial_response, "uncertain_span": span, } for span in low_confidence_spans ] callbacks = _run_manager.get_child() question_gen_outputs = self.question_generator_chain.apply( question_gen_inputs, callbacks=callbacks ) questions = [ output[self.question_generator_chain.output_keys[0]] for output in question_gen_outputs ] _run_manager.on_text( f"Generated Questions: {questions}", color="yellow", end="\n" ) return self._do_generation(questions, user_input, response, _run_manager) def _call( self, inputs: Dict[str, Any],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
5180c41ddf34-4
def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() user_input = inputs[self.input_keys[0]] response = "" for i in range(self.max_iter): _run_manager.on_text( f"Current Response: {response}", color="blue", end="\n" ) _input = {"user_input": user_input, "context": "", "response": response} tokens, log_probs = self.response_chain.generate_tokens_and_log_probs( _input, run_manager=_run_manager ) low_confidence_spans = _low_confidence_spans( tokens, log_probs, self.min_prob, self.min_token_gap, self.num_pad_tokens, ) initial_response = response.strip() + " " + "".join(tokens) if not low_confidence_spans: response = initial_response final_response, finished = self.output_parser.parse(response) if finished: return {self.output_keys[0]: final_response} continue marginal, finished = self._do_retrieval( low_confidence_spans, _run_manager, user_input, response, initial_response, ) response = response.strip() + " " + marginal if finished: break return {self.output_keys[0]: response} [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, max_generation_len: int = 32, **kwargs: Any
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
5180c41ddf34-5
) -> FlareChain: """Creates a FlareChain from a language model. Args: llm: Language model to use. max_generation_len: Maximum length of the generated response. **kwargs: Additional arguments to pass to the constructor. Returns: FlareChain class with the given language model. """ question_gen_chain = QuestionGeneratorChain(llm=llm) response_llm = OpenAI( max_tokens=max_generation_len, model_kwargs={"logprobs": 1}, temperature=0 ) response_chain = _OpenAIResponseChain(llm=response_llm) return cls( question_generator_chain=question_gen_chain, response_chain=response_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
ea3a9bff58cc-0
Source code for langchain.chains.flare.prompts from typing import Tuple from langchain.prompts import PromptTemplate from langchain.schema import BaseOutputParser [docs]class FinishedOutputParser(BaseOutputParser[Tuple[str, bool]]): """Output parser that checks if the output is finished.""" finished_value: str = "FINISHED" """Value that indicates the output is finished.""" [docs] def parse(self, text: str) -> Tuple[str, bool]: cleaned = text.strip() finished = self.finished_value in cleaned return cleaned.replace(self.finished_value, ""), finished PROMPT_TEMPLATE = """\ Respond to the user message using any relevant context. \ If context is provided, you should ground your answer in that context. \ Once you're done responding return FINISHED. >>> CONTEXT: {context} >>> USER INPUT: {user_input} >>> RESPONSE: {response}\ """ PROMPT = PromptTemplate( template=PROMPT_TEMPLATE, input_variables=["user_input", "context", "response"], ) QUESTION_GENERATOR_PROMPT_TEMPLATE = """\ Given a user input and an existing partial response as context, \ ask a question to which the answer is the given term/entity/phrase: >>> USER INPUT: {user_input} >>> EXISTING PARTIAL RESPONSE: {current_response} The question to which the answer is the term/entity/phrase "{uncertain_span}" is:""" QUESTION_GENERATOR_PROMPT = PromptTemplate( template=QUESTION_GENERATOR_PROMPT_TEMPLATE, input_variables=["user_input", "current_response", "uncertain_span"], )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/prompts.html
f3a9bed8df3d-0
Source code for langchain.chains.llm_checker.base """Chain for question-answering with self-verification.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_checker.prompt import ( CHECK_ASSERTIONS_PROMPT, CREATE_DRAFT_ANSWER_PROMPT, LIST_ASSERTIONS_PROMPT, REVISED_ANSWER_PROMPT, ) from langchain.chains.sequential import SequentialChain from langchain.prompts import PromptTemplate from langchain.schema.language_model import BaseLanguageModel def _load_question_to_checked_assertions_chain( llm: BaseLanguageModel, create_draft_answer_prompt: PromptTemplate, list_assertions_prompt: PromptTemplate, check_assertions_prompt: PromptTemplate, revised_answer_prompt: PromptTemplate, ) -> SequentialChain: create_draft_answer_chain = LLMChain( llm=llm, prompt=create_draft_answer_prompt, output_key="statement", ) list_assertions_chain = LLMChain( llm=llm, prompt=list_assertions_prompt, output_key="assertions", ) check_assertions_chain = LLMChain( llm=llm, prompt=check_assertions_prompt, output_key="checked_assertions", ) revised_answer_chain = LLMChain( llm=llm, prompt=revised_answer_prompt, output_key="revised_statement", ) chains = [ create_draft_answer_chain,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
f3a9bed8df3d-1
) chains = [ create_draft_answer_chain, list_assertions_chain, check_assertions_chain, revised_answer_chain, ] question_to_checked_assertions_chain = SequentialChain( chains=chains, input_variables=["question"], output_variables=["revised_statement"], verbose=True, ) return question_to_checked_assertions_chain [docs]class LLMCheckerChain(Chain): """Chain for question-answering with self-verification. Example: .. code-block:: python from langchain import OpenAI, LLMCheckerChain llm = OpenAI(temperature=0.7) checker_chain = LLMCheckerChain.from_llm(llm) """ question_to_checked_assertions_chain: SequentialChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT """[Deprecated]""" list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT """[Deprecated]""" check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT """[Deprecated]""" revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT """[Deprecated] Prompt to use when questioning the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
f3a9bed8df3d-2
if "llm" in values: warnings.warn( "Directly instantiating an LLMCheckerChain with an llm is deprecated. " "Please instantiate with question_to_checked_assertions_chain " "or using the from_llm class method." ) if ( "question_to_checked_assertions_chain" not in values and values["llm"] is not None ): question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain( values["llm"], values.get( "create_draft_answer_prompt", CREATE_DRAFT_ANSWER_PROMPT ), values.get("list_assertions_prompt", LIST_ASSERTIONS_PROMPT), values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT), values.get("revised_answer_prompt", REVISED_ANSWER_PROMPT), ) ) values[ "question_to_checked_assertions_chain" ] = question_to_checked_assertions_chain return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] output = self.question_to_checked_assertions_chain(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
f3a9bed8df3d-3
output = self.question_to_checked_assertions_chain( {"question": question}, callbacks=_run_manager.get_child() ) return {self.output_key: output["revised_statement"]} @property def _chain_type(self) -> str: return "llm_checker_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT, list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT, check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT, revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT, **kwargs: Any, ) -> LLMCheckerChain: question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain( llm, create_draft_answer_prompt, list_assertions_prompt, check_assertions_prompt, revised_answer_prompt, ) ) return cls( question_to_checked_assertions_chain=question_to_checked_assertions_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
1dad0d01e8a9-0
Source code for langchain.chains.retrieval_qa.base """Chain for question-answering against a vector database.""" from __future__ import annotations import inspect import warnings from abc import abstractmethod from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever, Document from langchain.schema.language_model import BaseLanguageModel from langchain.vectorstores.base import VectorStore [docs]class BaseRetrievalQA(Chain): """Base class for question-answering chains.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_source_documents: bool = False """Return the source documents or not.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Input keys. :meta private: """ return [self.input_key]
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
1dad0d01e8a9-1
:meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseRetrievalQA: """Initialize from LLM.""" _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=_prompt, callbacks=callbacks) document_prompt = PromptTemplate( input_variables=["page_content"], template="Context:\n{page_content}" ) combine_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_variable_name="context", document_prompt=document_prompt, callbacks=callbacks, ) return cls( combine_documents_chain=combine_documents_chain, callbacks=callbacks, **kwargs, ) [docs] @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
1dad0d01e8a9-2
combine_documents_chain = load_qa_chain( llm, chain_type=chain_type, **_chain_type_kwargs ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @abstractmethod def _get_docs( self, question: str, *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get documents to do question answering over.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] accepts_run_manager = ( "run_manager" in inspect.signature(self._get_docs).parameters ) if accepts_run_manager: docs = self._get_docs(question, run_manager=_run_manager) else: docs = self._get_docs(question) # type: ignore[call-arg] answer = self.combine_documents_chain.run( input_documents=docs, question=question, callbacks=_run_manager.get_child() ) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
1dad0d01e8a9-3
return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} @abstractmethod async def _aget_docs( self, question: str, *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get documents to do question answering over.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] accepts_run_manager = ( "run_manager" in inspect.signature(self._aget_docs).parameters ) if accepts_run_manager: docs = await self._aget_docs(question, run_manager=_run_manager) else: docs = await self._aget_docs(question) # type: ignore[call-arg] answer = await self.combine_documents_chain.arun( input_documents=docs, question=question, callbacks=_run_manager.get_child() ) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
1dad0d01e8a9-4
else: return {self.output_key: answer} [docs]class RetrievalQA(BaseRetrievalQA): """Chain for question-answering against an index. Example: .. code-block:: python from langchain.llms import OpenAI from langchain.chains import RetrievalQA from langchain.faiss import FAISS from langchain.vectorstores.base import VectorStoreRetriever retriever = VectorStoreRetriever(vectorstore=FAISS(...)) retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever) """ retriever: BaseRetriever = Field(exclude=True) def _get_docs( self, question: str, *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" return self.retriever.get_relevant_documents( question, callbacks=run_manager.get_child() ) async def _aget_docs( self, question: str, *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" return await self.retriever.aget_relevant_documents( question, callbacks=run_manager.get_child() ) @property def _chain_type(self) -> str: """Return the chain type.""" return "retrieval_qa" [docs]class VectorDBQA(BaseRetrievalQA): """Chain for question-answering against a vector database.""" vectorstore: VectorStore = Field(exclude=True, alias="vectorstore") """Vector Database to connect to.""" k: int = 4
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
1dad0d01e8a9-5
"""Vector Database to connect to.""" k: int = 4 """Number of documents to query for.""" search_type: str = "similarity" """Search type to use over vectorstore. `similarity` or `mmr`.""" search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQA` is deprecated - " "please use `from langchain.chains import RetrievalQA`" ) return values @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "mmr"): raise ValueError(f"search_type of {search_type} not allowed.") return values def _get_docs( self, question: str, *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" if self.search_type == "similarity": docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs ) elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( question, k=self.k, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def _aget_docs( self, question: str, *,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
1dad0d01e8a9-6
self, question: str, *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" raise NotImplementedError("VectorDBQA does not support async") @property def _chain_type(self) -> str: """Return the chain type.""" return "vector_db_qa"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
38b63a9ef782-0
Source code for langchain.chains.query_constructor.parser import datetime from typing import Any, Optional, Sequence, Union from langchain.utils import check_package_version try: check_package_version("lark", gte_version="1.1.5") from lark import Lark, Transformer, v_args except ImportError: [docs] def v_args(*args: Any, **kwargs: Any) -> Any: # type: ignore return lambda _: None Transformer = object # type: ignore Lark = object # type: ignore from langchain.chains.query_constructor.ir import ( Comparator, Comparison, FilterDirective, Operation, Operator, ) GRAMMAR = """ ?program: func_call ?expr: func_call | value func_call: CNAME "(" [args] ")" ?value: SIGNED_INT -> int | SIGNED_FLOAT -> float | TIMESTAMP -> timestamp | list | string | ("false" | "False" | "FALSE") -> false | ("true" | "True" | "TRUE") -> true args: expr ("," expr)* TIMESTAMP.2: /["'](\d{4}-[01]\d-[0-3]\d)["']/ string: /'[^']*'/ | ESCAPED_STRING list: "[" [args] "]" %import common.CNAME %import common.ESCAPED_STRING %import common.SIGNED_FLOAT %import common.SIGNED_INT %import common.WS %ignore WS """ @v_args(inline=True) class QueryTransformer(Transformer): """Transforms a query string into an intermediate representation.""" def __init__(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/parser.html
38b63a9ef782-1
"""Transforms a query string into an intermediate representation.""" def __init__( self, *args: Any, allowed_comparators: Optional[Sequence[Comparator]] = None, allowed_operators: Optional[Sequence[Operator]] = None, **kwargs: Any, ): super().__init__(*args, **kwargs) self.allowed_comparators = allowed_comparators self.allowed_operators = allowed_operators def program(self, *items: Any) -> tuple: return items def func_call(self, func_name: Any, args: list) -> FilterDirective: func = self._match_func_name(str(func_name)) if isinstance(func, Comparator): return Comparison(comparator=func, attribute=args[0], value=args[1]) elif len(args) == 1 and func in (Operator.AND, Operator.OR): return args[0] else: return Operation(operator=func, arguments=args) def _match_func_name(self, func_name: str) -> Union[Operator, Comparator]: if func_name in set(Comparator): if self.allowed_comparators is not None: if func_name not in self.allowed_comparators: raise ValueError( f"Received disallowed comparator {func_name}. Allowed " f"comparators are {self.allowed_comparators}" ) return Comparator(func_name) elif func_name in set(Operator): if self.allowed_operators is not None: if func_name not in self.allowed_operators: raise ValueError( f"Received disallowed operator {func_name}. Allowed operators" f" are {self.allowed_operators}" ) return Operator(func_name)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/parser.html
38b63a9ef782-2
) return Operator(func_name) else: raise ValueError( f"Received unrecognized function {func_name}. Valid functions are " f"{list(Operator) + list(Comparator)}" ) def args(self, *items: Any) -> tuple: return items def false(self) -> bool: return False def true(self) -> bool: return True def list(self, item: Any) -> list: if item is None: return [] return list(item) def int(self, item: Any) -> int: return int(item) def float(self, item: Any) -> float: return float(item) def timestamp(self, item: Any) -> datetime.date: item = item.replace("'", '"') return datetime.datetime.strptime(item, '"%Y-%m-%d"').date() def string(self, item: Any) -> str: # Remove escaped quotes return str(item).strip("\"'") [docs]def get_parser( allowed_comparators: Optional[Sequence[Comparator]] = None, allowed_operators: Optional[Sequence[Operator]] = None, ) -> Lark: """ Returns a parser for the query language. Args: allowed_comparators: Optional[Sequence[Comparator]] allowed_operators: Optional[Sequence[Operator]] Returns: Lark parser for the query language. """ # QueryTransformer is None when Lark cannot be imported. if QueryTransformer is None: raise ImportError( "Cannot import lark, please install it with 'pip install lark'." ) transformer = QueryTransformer(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/parser.html
38b63a9ef782-3
) transformer = QueryTransformer( allowed_comparators=allowed_comparators, allowed_operators=allowed_operators ) return Lark(GRAMMAR, parser="lalr", transformer=transformer, start="program")
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/parser.html
15c8dcb04295-0
Source code for langchain.chains.query_constructor.base """LLM Chain for turning a user text query into a structured query.""" from __future__ import annotations import json from typing import Any, Callable, List, Optional, Sequence from langchain import FewShotPromptTemplate, LLMChain from langchain.chains.query_constructor.ir import ( Comparator, Operator, StructuredQuery, ) from langchain.chains.query_constructor.parser import get_parser from langchain.chains.query_constructor.prompt import ( DEFAULT_EXAMPLES, DEFAULT_PREFIX, DEFAULT_SCHEMA, DEFAULT_SUFFIX, EXAMPLE_PROMPT, EXAMPLES_WITH_LIMIT, SCHEMA_WITH_LIMIT, ) from langchain.chains.query_constructor.schema import AttributeInfo from langchain.output_parsers.json import parse_and_check_json_markdown from langchain.schema import BaseOutputParser, BasePromptTemplate, OutputParserException from langchain.schema.language_model import BaseLanguageModel [docs]class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]): """Output parser that parses a structured query.""" ast_parse: Callable """Callable that parses dict into internal representation of query language.""" [docs] def parse(self, text: str) -> StructuredQuery: try: expected_keys = ["query", "filter"] allowed_keys = ["query", "filter", "limit"] parsed = parse_and_check_json_markdown(text, expected_keys) if len(parsed["query"]) == 0: parsed["query"] = " " if parsed["filter"] == "NO_FILTER" or not parsed["filter"]: parsed["filter"] = None else: parsed["filter"] = self.ast_parse(parsed["filter"]) if not parsed.get("limit"):
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/base.html
15c8dcb04295-1
if not parsed.get("limit"): parsed.pop("limit", None) return StructuredQuery( **{k: v for k, v in parsed.items() if k in allowed_keys} ) except Exception as e: raise OutputParserException( f"Parsing text\n{text}\n raised following error:\n{e}" ) [docs] @classmethod def from_components( cls, allowed_comparators: Optional[Sequence[Comparator]] = None, allowed_operators: Optional[Sequence[Operator]] = None, ) -> StructuredQueryOutputParser: """ Create a structured query output parser from components. Args: allowed_comparators: allowed comparators allowed_operators: allowed operators Returns: a structured query output parser """ ast_parser = get_parser( allowed_comparators=allowed_comparators, allowed_operators=allowed_operators ) return cls(ast_parse=ast_parser.parse) def _format_attribute_info(info: Sequence[AttributeInfo]) -> str: info_dicts = {} for i in info: i_dict = dict(i) info_dicts[i_dict.pop("name")] = i_dict return json.dumps(info_dicts, indent=4).replace("{", "{{").replace("}", "}}") def _get_prompt( document_contents: str, attribute_info: Sequence[AttributeInfo], examples: Optional[List] = None, allowed_comparators: Optional[Sequence[Comparator]] = None, allowed_operators: Optional[Sequence[Operator]] = None, enable_limit: bool = False, ) -> BasePromptTemplate: attribute_str = _format_attribute_info(attribute_info)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/base.html
15c8dcb04295-2
) -> BasePromptTemplate: attribute_str = _format_attribute_info(attribute_info) allowed_comparators = allowed_comparators or list(Comparator) allowed_operators = allowed_operators or list(Operator) if enable_limit: schema = SCHEMA_WITH_LIMIT.format( allowed_comparators=" | ".join(allowed_comparators), allowed_operators=" | ".join(allowed_operators), ) examples = examples or EXAMPLES_WITH_LIMIT else: schema = DEFAULT_SCHEMA.format( allowed_comparators=" | ".join(allowed_comparators), allowed_operators=" | ".join(allowed_operators), ) examples = examples or DEFAULT_EXAMPLES prefix = DEFAULT_PREFIX.format(schema=schema) suffix = DEFAULT_SUFFIX.format( i=len(examples) + 1, content=document_contents, attributes=attribute_str ) output_parser = StructuredQueryOutputParser.from_components( allowed_comparators=allowed_comparators, allowed_operators=allowed_operators ) return FewShotPromptTemplate( examples=examples, example_prompt=EXAMPLE_PROMPT, input_variables=["query"], suffix=suffix, prefix=prefix, output_parser=output_parser, ) [docs]def load_query_constructor_chain( llm: BaseLanguageModel, document_contents: str, attribute_info: List[AttributeInfo], examples: Optional[List] = None, allowed_comparators: Optional[Sequence[Comparator]] = None, allowed_operators: Optional[Sequence[Operator]] = None, enable_limit: bool = False, **kwargs: Any, ) -> LLMChain: """Load a query constructor chain.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/base.html
15c8dcb04295-3
) -> LLMChain: """Load a query constructor chain. Args: llm: BaseLanguageModel to use for the chain. document_contents: The contents of the document to be queried. attribute_info: A list of AttributeInfo objects describing the attributes of the document. examples: Optional list of examples to use for the chain. allowed_comparators: An optional list of allowed comparators. allowed_operators: An optional list of allowed operators. enable_limit: Whether to enable the limit operator. Defaults to False. **kwargs: Returns: A LLMChain that can be used to construct queries. """ prompt = _get_prompt( document_contents, attribute_info, examples=examples, allowed_comparators=allowed_comparators, allowed_operators=allowed_operators, enable_limit=enable_limit, ) return LLMChain(llm=llm, prompt=prompt, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/base.html
95cb36993fe0-0
Source code for langchain.chains.query_constructor.ir """Internal representation of a structured query language.""" from __future__ import annotations from abc import ABC, abstractmethod from enum import Enum from typing import Any, List, Optional, Sequence, Union from pydantic import BaseModel [docs]class Visitor(ABC): """Defines interface for IR translation using visitor pattern.""" allowed_comparators: Optional[Sequence[Comparator]] = None allowed_operators: Optional[Sequence[Operator]] = None def _validate_func(self, func: Union[Operator, Comparator]) -> None: if isinstance(func, Operator) and self.allowed_operators is not None: if func not in self.allowed_operators: raise ValueError( f"Received disallowed operator {func}. Allowed " f"comparators are {self.allowed_operators}" ) if isinstance(func, Comparator) and self.allowed_comparators is not None: if func not in self.allowed_comparators: raise ValueError( f"Received disallowed comparator {func}. Allowed " f"comparators are {self.allowed_comparators}" ) [docs] @abstractmethod def visit_operation(self, operation: Operation) -> Any: """Translate an Operation.""" [docs] @abstractmethod def visit_comparison(self, comparison: Comparison) -> Any: """Translate a Comparison.""" [docs] @abstractmethod def visit_structured_query(self, structured_query: StructuredQuery) -> Any: """Translate a StructuredQuery.""" def _to_snake_case(name: str) -> str: """Convert a name into snake_case.""" snake_case = "" for i, char in enumerate(name):
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/ir.html
95cb36993fe0-1
snake_case = "" for i, char in enumerate(name): if char.isupper() and i != 0: snake_case += "_" + char.lower() else: snake_case += char.lower() return snake_case [docs]class Expr(BaseModel): """Base class for all expressions.""" [docs] def accept(self, visitor: Visitor) -> Any: """Accept a visitor. Args: visitor: visitor to accept Returns: result of visiting """ return getattr(visitor, f"visit_{_to_snake_case(self.__class__.__name__)}")( self ) [docs]class Operator(str, Enum): """Enumerator of the operations.""" AND = "and" OR = "or" NOT = "not" [docs]class Comparator(str, Enum): """Enumerator of the comparison operators.""" EQ = "eq" GT = "gt" GTE = "gte" LT = "lt" LTE = "lte" CONTAIN = "contain" LIKE = "like" [docs]class FilterDirective(Expr, ABC): """A filtering expression.""" [docs]class Comparison(FilterDirective): """A comparison to a value.""" comparator: Comparator attribute: str value: Any [docs]class Operation(FilterDirective): """A logical operation over other directives.""" operator: Operator arguments: List[FilterDirective] [docs]class StructuredQuery(Expr): """A structured query.""" query: str """Query string.""" filter: Optional[FilterDirective] """Filtering expression.""" limit: Optional[int]
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/ir.html
95cb36993fe0-2
"""Filtering expression.""" limit: Optional[int] """Limit on the number of results."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/ir.html
2a684b02ef16-0
Source code for langchain.chains.query_constructor.schema from pydantic import BaseModel [docs]class AttributeInfo(BaseModel): """Information about a data source attribute.""" name: str description: str type: str class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True frozen = True
https://api.python.langchain.com/en/latest/_modules/langchain/chains/query_constructor/schema.html
55eef8839829-0
Source code for langchain.chains.llm_symbolic_math.base """Chain that interprets a prompt and executes python code to do symbolic math.""" from __future__ import annotations import re from typing import Any, Dict, List, Optional from pydantic import Extra from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_symbolic_math.prompt import PROMPT from langchain.prompts.base import BasePromptTemplate [docs]class LLMSymbolicMathChain(Chain): """Chain that interprets a prompt and executes python code to do symbolic math. Example: .. code-block:: python from langchain import LLMSymbolicMathChain, OpenAI llm_symbolic_math = LLMSymbolicMathChain.from_llm(OpenAI()) """ llm_chain: LLMChain input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _evaluate_expression(self, expression: str) -> str: try: import sympy
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_symbolic_math/base.html
55eef8839829-1
try: import sympy except ImportError as e: raise ImportError( "Unable to import sympy, please install it with `pip install sympy`." ) from e try: output = str(sympy.sympify(expression, evaluate=True)) except Exception as e: raise ValueError( f'LLMSymbolicMathChain._evaluate("{expression}") raised error: {e}.' " Please try again with a valid numerical expression" ) # Remove any leading and trailing brackets from the output return re.sub(r"^\[|\]$", "", output) def _process_llm_result( self, llm_output: str, run_manager: CallbackManagerForChainRun ) -> Dict[str, str]: run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) run_manager.on_text("\nAnswer: ", verbose=self.verbose) run_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} async def _aprocess_llm_result( self,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_symbolic_math/base.html
55eef8839829-2
async def _aprocess_llm_result( self, llm_output: str, run_manager: AsyncCallbackManagerForChainRun, ) -> Dict[str, str]: await run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) await run_manager.on_text("\nAnswer: ", verbose=self.verbose) await run_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key]) llm_output = self.llm_chain.predict( question=inputs[self.input_key], stop=["```output"], callbacks=_run_manager.get_child(), ) return self._process_llm_result(llm_output, _run_manager) async def _acall( self, inputs: Dict[str, str],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_symbolic_math/base.html
55eef8839829-3
self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() await _run_manager.on_text(inputs[self.input_key]) llm_output = await self.llm_chain.apredict( question=inputs[self.input_key], stop=["```output"], callbacks=_run_manager.get_child(), ) return await self._aprocess_llm_result(llm_output, _run_manager) @property def _chain_type(self) -> str: return "llm_symbolic_math_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate = PROMPT, **kwargs: Any, ) -> LLMSymbolicMathChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_symbolic_math/base.html
23eb0300de5e-0
Source code for langchain.chains.api.base """Chain that makes API calls and summarizes the responses to answer a question.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.utilities.requests import TextRequestsWrapper [docs]class APIChain(Chain): """Chain that makes API calls and summarizes the responses to answer a question.""" api_request_chain: LLMChain api_answer_chain: LLMChain requests_wrapper: TextRequestsWrapper = Field(exclude=True) api_docs: str question_key: str = "question" #: :meta private: output_key: str = "output" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.question_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] @root_validator(pre=True) def validate_api_request_prompt(cls, values: Dict) -> Dict: """Check that api request prompt expects the right variables.""" input_vars = values["api_request_chain"].prompt.input_variables expected_vars = {"question", "api_docs"} if set(input_vars) != expected_vars:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
23eb0300de5e-1
if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values @root_validator(pre=True) def validate_api_answer_prompt(cls, values: Dict) -> Dict: """Check that api answer prompt expects the right variables.""" input_vars = values["api_answer_chain"].prompt.input_variables expected_vars = {"question", "api_docs", "api_url", "api_response"} if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = self.api_request_chain.predict( question=question, api_docs=self.api_docs, callbacks=_run_manager.get_child(), ) _run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose) api_url = api_url.strip() api_response = self.requests_wrapper.get(api_url) _run_manager.on_text( api_response, color="yellow", end="\n", verbose=self.verbose ) answer = self.api_answer_chain.predict( question=question, api_docs=self.api_docs, api_url=api_url, api_response=api_response, callbacks=_run_manager.get_child(), ) return {self.output_key: answer} async def _acall(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
23eb0300de5e-2
return {self.output_key: answer} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = await self.api_request_chain.apredict( question=question, api_docs=self.api_docs, callbacks=_run_manager.get_child(), ) await _run_manager.on_text( api_url, color="green", end="\n", verbose=self.verbose ) api_url = api_url.strip() api_response = await self.requests_wrapper.aget(api_url) await _run_manager.on_text( api_response, color="yellow", end="\n", verbose=self.verbose ) answer = await self.api_answer_chain.apredict( question=question, api_docs=self.api_docs, api_url=api_url, api_response=api_response, callbacks=_run_manager.get_child(), ) return {self.output_key: answer} [docs] @classmethod def from_llm_and_api_docs( cls, llm: BaseLanguageModel, api_docs: str, headers: Optional[dict] = None, api_url_prompt: BasePromptTemplate = API_URL_PROMPT, api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, **kwargs: Any, ) -> APIChain: """Load chain from just an LLM and the api docs.""" get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
23eb0300de5e-3
requests_wrapper = TextRequestsWrapper(headers=headers) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls( api_request_chain=get_request_chain, api_answer_chain=get_answer_chain, requests_wrapper=requests_wrapper, api_docs=api_docs, **kwargs, ) @property def _chain_type(self) -> str: return "api_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
6ea9b3729df4-0
Source code for langchain.chains.api.openapi.chain """Chain that makes API calls and summarizes the responses to answer a question.""" from __future__ import annotations import json from typing import Any, Dict, List, NamedTuple, Optional, cast from pydantic import BaseModel, Field from requests import Response from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains.api.openapi.requests_chain import APIRequesterChain from langchain.chains.api.openapi.response_chain import APIResponderChain from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.schema.language_model import BaseLanguageModel from langchain.tools.openapi.utils.api_models import APIOperation from langchain.utilities.requests import Requests class _ParamMapping(NamedTuple): """Mapping from parameter name to parameter value.""" query_params: List[str] body_params: List[str] path_params: List[str] [docs]class OpenAPIEndpointChain(Chain, BaseModel): """Chain interacts with an OpenAPI endpoint using natural language.""" api_request_chain: LLMChain api_response_chain: Optional[LLMChain] api_operation: APIOperation requests: Requests = Field(exclude=True, default_factory=Requests) param_mapping: _ParamMapping = Field(alias="param_mapping") return_intermediate_steps: bool = False instructions_key: str = "instructions" #: :meta private: output_key: str = "output" #: :meta private: max_text_length: Optional[int] = Field(ge=0) #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.instructions_key] @property
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
6ea9b3729df4-1
""" return [self.instructions_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, "intermediate_steps"] def _construct_path(self, args: Dict[str, str]) -> str: """Construct the path from the deserialized input.""" path = self.api_operation.base_url + self.api_operation.path for param in self.param_mapping.path_params: path = path.replace(f"{{{param}}}", str(args.pop(param, ""))) return path def _extract_query_params(self, args: Dict[str, str]) -> Dict[str, str]: """Extract the query params from the deserialized input.""" query_params = {} for param in self.param_mapping.query_params: if param in args: query_params[param] = args.pop(param) return query_params def _extract_body_params(self, args: Dict[str, str]) -> Optional[Dict[str, str]]: """Extract the request body params from the deserialized input.""" body_params = None if self.param_mapping.body_params: body_params = {} for param in self.param_mapping.body_params: if param in args: body_params[param] = args.pop(param) return body_params [docs] def deserialize_json_input(self, serialized_args: str) -> dict: """Use the serialized typescript dictionary. Resolve the path, query params dict, and optional requestBody dict. """ args: dict = json.loads(serialized_args) path = self._construct_path(args)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
6ea9b3729df4-2
path = self._construct_path(args) body_params = self._extract_body_params(args) query_params = self._extract_query_params(args) return { "url": path, "data": body_params, "params": query_params, } def _get_output(self, output: str, intermediate_steps: dict) -> dict: """Return the output from the API call.""" if self.return_intermediate_steps: return { self.output_key: output, "intermediate_steps": intermediate_steps, } else: return {self.output_key: output} def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() intermediate_steps = {} instructions = inputs[self.instructions_key] instructions = instructions[: self.max_text_length] _api_arguments = self.api_request_chain.predict_and_parse( instructions=instructions, callbacks=_run_manager.get_child() ) api_arguments = cast(str, _api_arguments) intermediate_steps["request_args"] = api_arguments _run_manager.on_text( api_arguments, color="green", end="\n", verbose=self.verbose ) if api_arguments.startswith("ERROR"): return self._get_output(api_arguments, intermediate_steps) elif api_arguments.startswith("MESSAGE:"): return self._get_output( api_arguments[len("MESSAGE:") :], intermediate_steps ) try: request_args = self.deserialize_json_input(api_arguments) method = getattr(self.requests, self.api_operation.method.value)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
6ea9b3729df4-3
method = getattr(self.requests, self.api_operation.method.value) api_response: Response = method(**request_args) if api_response.status_code != 200: method_str = str(self.api_operation.method.value) response_text = ( f"{api_response.status_code}: {api_response.reason}" + f"\nFor {method_str.upper()} {request_args['url']}\n" + f"Called with args: {request_args['params']}" ) else: response_text = api_response.text except Exception as e: response_text = f"Error with message {str(e)}" response_text = response_text[: self.max_text_length] intermediate_steps["response_text"] = response_text _run_manager.on_text( response_text, color="blue", end="\n", verbose=self.verbose ) if self.api_response_chain is not None: _answer = self.api_response_chain.predict_and_parse( response=response_text, instructions=instructions, callbacks=_run_manager.get_child(), ) answer = cast(str, _answer) _run_manager.on_text(answer, color="yellow", end="\n", verbose=self.verbose) return self._get_output(answer, intermediate_steps) else: return self._get_output(response_text, intermediate_steps) [docs] @classmethod def from_url_and_method( cls, spec_url: str, path: str, method: str, llm: BaseLanguageModel, requests: Optional[Requests] = None, return_intermediate_steps: bool = False, **kwargs: Any # TODO: Handle async ) -> "OpenAPIEndpointChain":
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
6ea9b3729df4-4
# TODO: Handle async ) -> "OpenAPIEndpointChain": """Create an OpenAPIEndpoint from a spec at the specified url.""" operation = APIOperation.from_openapi_url(spec_url, path, method) return cls.from_api_operation( operation, requests=requests, llm=llm, return_intermediate_steps=return_intermediate_steps, **kwargs, ) [docs] @classmethod def from_api_operation( cls, operation: APIOperation, llm: BaseLanguageModel, requests: Optional[Requests] = None, verbose: bool = False, return_intermediate_steps: bool = False, raw_response: bool = False, callbacks: Callbacks = None, **kwargs: Any # TODO: Handle async ) -> "OpenAPIEndpointChain": """Create an OpenAPIEndpointChain from an operation and a spec.""" param_mapping = _ParamMapping( query_params=operation.query_params, body_params=operation.body_params, path_params=operation.path_params, ) requests_chain = APIRequesterChain.from_llm_and_typescript( llm, typescript_definition=operation.to_typescript(), verbose=verbose, callbacks=callbacks, ) if raw_response: response_chain = None else: response_chain = APIResponderChain.from_llm( llm, verbose=verbose, callbacks=callbacks ) _requests = requests or Requests() return cls( api_request_chain=requests_chain, api_response_chain=response_chain, api_operation=operation, requests=_requests, param_mapping=param_mapping,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
6ea9b3729df4-5
requests=_requests, param_mapping=param_mapping, verbose=verbose, return_intermediate_steps=return_intermediate_steps, callbacks=callbacks, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
ef223576e235-0
Source code for langchain.chains.api.openapi.requests_chain """request parser.""" import json import re from typing import Any from langchain.chains.api.openapi.prompts import REQUEST_TEMPLATE from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser from langchain.schema.language_model import BaseLanguageModel [docs]class APIRequesterOutputParser(BaseOutputParser): """Parse the request and error tags.""" def _load_json_block(self, serialized_block: str) -> str: try: return json.dumps(json.loads(serialized_block, strict=False)) except json.JSONDecodeError: return "ERROR serializing request." [docs] def parse(self, llm_output: str) -> str: """Parse the request and error tags.""" json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL) if json_match: return self._load_json_block(json_match.group(1).strip()) message_match = re.search(r"```text(.*?)```", llm_output, re.DOTALL) if message_match: return f"MESSAGE: {message_match.group(1).strip()}" return "ERROR making request" @property def _type(self) -> str: return "api_requester" [docs]class APIRequesterChain(LLMChain): """Get the request parser.""" [docs] @classmethod def from_llm_and_typescript( cls, llm: BaseLanguageModel, typescript_definition: str, verbose: bool = True, **kwargs: Any, ) -> LLMChain: """Get the request parser."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/requests_chain.html
ef223576e235-1
) -> LLMChain: """Get the request parser.""" output_parser = APIRequesterOutputParser() prompt = PromptTemplate( template=REQUEST_TEMPLATE, output_parser=output_parser, partial_variables={"schema": typescript_definition}, input_variables=["instructions"], ) return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/requests_chain.html
b45b7b742662-0
Source code for langchain.chains.api.openapi.response_chain """Response parser.""" import json import re from typing import Any from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser from langchain.schema.language_model import BaseLanguageModel [docs]class APIResponderOutputParser(BaseOutputParser): """Parse the response and error tags.""" def _load_json_block(self, serialized_block: str) -> str: try: response_content = json.loads(serialized_block, strict=False) return response_content.get("response", "ERROR parsing response.") except json.JSONDecodeError: return "ERROR parsing response." except: raise [docs] def parse(self, llm_output: str) -> str: """Parse the response and error tags.""" json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL) if json_match: return self._load_json_block(json_match.group(1).strip()) else: raise ValueError(f"No response found in output: {llm_output}.") @property def _type(self) -> str: return "api_responder" [docs]class APIResponderChain(LLMChain): """Get the response parser.""" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any ) -> LLMChain: """Get the response parser.""" output_parser = APIResponderOutputParser() prompt = PromptTemplate( template=RESPONSE_TEMPLATE, output_parser=output_parser,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/response_chain.html
b45b7b742662-1
template=RESPONSE_TEMPLATE, output_parser=output_parser, input_variables=["response", "instructions"], ) return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/response_chain.html
c0ad0cf107ae-0
Source code for langchain.chains.elasticsearch_database.base """Chain for interacting with Elasticsearch Database.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.elasticsearch_database.prompts import ANSWER_PROMPT, DSL_PROMPT from langchain.chains.llm import LLMChain from langchain.output_parsers.json import SimpleJsonOutputParser from langchain.schema import BaseLLMOutputParser, BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel if TYPE_CHECKING: from elasticsearch import Elasticsearch INTERMEDIATE_STEPS_KEY = "intermediate_steps" [docs]class ElasticsearchDatabaseChain(Chain): """Chain for interacting with Elasticsearch Database. Example: .. code-block:: python from langchain import ElasticsearchDatabaseChain, OpenAI from elasticsearch import Elasticsearch database = Elasticsearch("http://localhost:9200") db_chain = ElasticsearchDatabaseChain.from_llm(OpenAI(), database) """ query_chain: LLMChain """Chain for creating the ES query.""" answer_chain: LLMChain """Chain for answering the user question.""" database: Any """Elasticsearch database to connect to of type elasticsearch.Elasticsearch.""" top_k: int = 10 """Number of results to return from the query""" ignore_indices: Optional[List[str]] = None include_indices: Optional[List[str]] = None input_key: str = "question" #: :meta private: output_key: str = "result" #: :meta private: sample_documents_in_index_info: int = 3
https://api.python.langchain.com/en/latest/_modules/langchain/chains/elasticsearch_database/base.html
c0ad0cf107ae-1
sample_documents_in_index_info: int = 3 return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_indices(cls, values: dict) -> dict: if values["include_indices"] and values["ignore_indices"]: raise ValueError( "Cannot specify both 'include_indices' and 'ignore_indices'." ) return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY] def _list_indices(self) -> List[str]: all_indices = [ index["index"] for index in self.database.cat.indices(format="json") ] if self.include_indices: all_indices = [i for i in all_indices if i in self.include_indices] if self.ignore_indices: all_indices = [i for i in all_indices if i not in self.ignore_indices] return all_indices def _get_indices_infos(self, indices: List[str]) -> str: mappings = self.database.indices.get_mapping(index=",".join(indices)) if self.sample_documents_in_index_info > 0: for k, v in mappings.items():
https://api.python.langchain.com/en/latest/_modules/langchain/chains/elasticsearch_database/base.html
c0ad0cf107ae-2
for k, v in mappings.items(): hits = self.database.search( index=k, query={"match_all": {}}, size=self.sample_documents_in_index_info, )["hits"]["hits"] hits = [str(hit["_source"]) for hit in hits] mappings[k]["mappings"] = str(v) + "\n\n/*\n" + "\n".join(hits) + "\n*/" return "\n\n".join( [ "Mapping for index {}:\n{}".format(index, mappings[index]["mappings"]) for index in mappings ] ) def _search(self, indices: List[str], query: str) -> str: result = self.database.search(index=",".join(indices), body=query) return str(result) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() input_text = f"{inputs[self.input_key]}\nESQuery:" _run_manager.on_text(input_text, verbose=self.verbose) indices = self._list_indices() indices_info = self._get_indices_infos(indices) query_inputs: dict = { "input": input_text, "top_k": str(self.top_k), "indices_info": indices_info, "stop": ["\nESResult:"], } intermediate_steps: List = [] try: intermediate_steps.append(query_inputs) # input: es generation es_cmd = self.query_chain.run( callbacks=_run_manager.get_child(), **query_inputs,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/elasticsearch_database/base.html
c0ad0cf107ae-3
callbacks=_run_manager.get_child(), **query_inputs, ) _run_manager.on_text(es_cmd, color="green", verbose=self.verbose) intermediate_steps.append( es_cmd ) # output: elasticsearch dsl generation (no checker) intermediate_steps.append({"es_cmd": es_cmd}) # input: ES search result = self._search(indices=indices, query=es_cmd) intermediate_steps.append(str(result)) # output: ES search _run_manager.on_text("\nESResult: ", verbose=self.verbose) _run_manager.on_text(result, color="yellow", verbose=self.verbose) _run_manager.on_text("\nAnswer:", verbose=self.verbose) answer_inputs: dict = {"data": result, "input": input_text} intermediate_steps.append(answer_inputs) # input: final answer final_result = self.answer_chain.run( callbacks=_run_manager.get_child(), **answer_inputs, ) intermediate_steps.append(final_result) # output: final answer _run_manager.on_text(final_result, color="green", verbose=self.verbose) chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result except Exception as exc: # Append intermediate steps to exception, to aid in logging and later # improvement of few shot prompt seeds exc.intermediate_steps = intermediate_steps # type: ignore raise exc @property def _chain_type(self) -> str: return "elasticsearch_database_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/elasticsearch_database/base.html
c0ad0cf107ae-4
def from_llm( cls, llm: BaseLanguageModel, database: Elasticsearch, *, query_prompt: Optional[BasePromptTemplate] = None, answer_prompt: Optional[BasePromptTemplate] = None, query_output_parser: Optional[BaseLLMOutputParser] = None, **kwargs: Any, ) -> ElasticsearchDatabaseChain: """Convenience method to construct ElasticsearchDatabaseChain from an LLM. Args: llm: The language model to use. database: The Elasticsearch db. query_prompt: The prompt to use for query construction. answer_prompt: The prompt to use for answering user question given data. query_output_parser: The output parser to use for parsing model-generated ES query. Defaults to SimpleJsonOutputParser. **kwargs: Additional arguments to pass to the constructor. """ query_prompt = query_prompt or DSL_PROMPT query_output_parser = query_output_parser or SimpleJsonOutputParser() query_chain = LLMChain( llm=llm, prompt=query_prompt, output_parser=query_output_parser ) answer_prompt = answer_prompt or ANSWER_PROMPT answer_chain = LLMChain(llm=llm, prompt=answer_prompt) return cls( query_chain=query_chain, answer_chain=answer_chain, database=database, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/elasticsearch_database/base.html
597b8de8cf15-0
Source code for langchain.chains.conversation.base """Chain that carries on a conversation and calls an LLM.""" from typing import Dict, List from pydantic import Extra, Field, root_validator from langchain.chains.conversation.prompt import PROMPT from langchain.chains.llm import LLMChain from langchain.memory.buffer import ConversationBufferMemory from langchain.schema import BaseMemory, BasePromptTemplate [docs]class ConversationChain(LLMChain): """Chain to have a conversation and load context from memory. Example: .. code-block:: python from langchain import ConversationChain, OpenAI conversation = ConversationChain(llm=OpenAI()) """ memory: BaseMemory = Field(default_factory=ConversationBufferMemory) """Default memory store.""" prompt: BasePromptTemplate = PROMPT """Default conversation prompt to use.""" input_key: str = "input" #: :meta private: output_key: str = "response" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Use this since so some prompt vars come from history.""" return [self.input_key] @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables input_key = values["input_key"] if input_key in memory_keys: raise ValueError( f"The input key {input_key} was also found in the memory keys " f"({memory_keys}) - please provide keys that don't overlap." )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html
597b8de8cf15-1
f"({memory_keys}) - please provide keys that don't overlap." ) prompt_variables = values["prompt"].input_variables expected_keys = memory_keys + [input_key] if set(expected_keys) != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but got {memory_keys} as inputs from " f"memory, and {input_key} as the normal input key." ) return values
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html
2738c6adba9d-0
Source code for langchain.chat_models.base import asyncio import inspect import warnings from abc import ABC, abstractmethod from functools import partial from typing import ( Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, cast, ) from pydantic import Field, root_validator import langchain from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.load.dump import dumpd, dumps from langchain.prompts.base import StringPromptValue from langchain.prompts.chat import ChatPromptValue from langchain.schema import ( ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo, ) from langchain.schema.language_model import BaseLanguageModel, LanguageModelInput from langchain.schema.messages import ( AIMessage, BaseMessage, BaseMessageChunk, HumanMessage, ) from langchain.schema.output import ChatGenerationChunk from langchain.schema.runnable import RunnableConfig def _get_verbosity() -> bool: return langchain.verbose [docs]class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC): cache: Optional[bool] = None """Whether to cache the response.""" verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to add to the run trace.""" callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-1
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) """Callback manager to add to the run trace.""" tags: Optional[List[str]] = Field(default=None, exclude=True) """Tags to add to the run trace.""" metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # --- Runnable methods --- def _convert_input(self, input: LanguageModelInput) -> PromptValue: if isinstance(input, PromptValue): return input elif isinstance(input, str): return StringPromptValue(text=input) elif isinstance(input, list): return ChatPromptValue(messages=input) else: raise ValueError( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) [docs] def invoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> BaseMessageChunk: return cast( BaseMessageChunk, cast( ChatGeneration,
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-2
return cast( BaseMessageChunk, cast( ChatGeneration, self.generate_prompt( [self._convert_input(input)], stop=stop, **(config or {}), **kwargs ).generations[0][0], ).message, ) [docs] async def ainvoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> BaseMessageChunk: if type(self)._agenerate == BaseChatModel._agenerate: # model doesn't implement async generation, so use default implementation return await asyncio.get_running_loop().run_in_executor( None, partial(self.invoke, input, config, stop=stop, **kwargs) ) llm_result = await self.agenerate_prompt( [self._convert_input(input)], stop=stop, **(config or {}), **kwargs ) return cast( BaseMessageChunk, cast(ChatGeneration, llm_result.generations[0][0]).message ) [docs] def stream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> Iterator[BaseMessageChunk]: if type(self)._stream == BaseChatModel._stream: # model doesn't implement streaming, so use default implementation yield self.invoke(input, config=config, stop=stop, **kwargs) else: config = config or {} messages = self._convert_input(input).to_messages()
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-3
config = config or {} messages = self._convert_input(input).to_messages() params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop, **kwargs} callback_manager = CallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = callback_manager.on_chat_model_start( dumpd(self), [messages], invocation_params=params, options=options ) try: message: Optional[BaseMessageChunk] = None for chunk in self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.message if message is None: message = chunk.message else: message += chunk.message assert message is not None except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e else: run_manager.on_llm_end( LLMResult(generations=[[ChatGeneration(message=message)]]), ) [docs] async def astream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> AsyncIterator[BaseMessageChunk]: if type(self)._astream == BaseChatModel._astream: # model doesn't implement streaming, so use default implementation yield self.invoke(input, config=config, stop=stop, **kwargs) else: config = config or {}
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-4
else: config = config or {} messages = self._convert_input(input).to_messages() params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop, **kwargs} callback_manager = AsyncCallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = await callback_manager.on_chat_model_start( dumpd(self), [messages], invocation_params=params, options=options ) try: message: Optional[BaseMessageChunk] = None async for chunk in self._astream( messages, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.message if message is None: message = chunk.message else: message += chunk.message assert message is not None except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e else: await run_manager.on_llm_end( LLMResult(generations=[[ChatGeneration(message=message)]]), ) # --- Custom methods --- def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: return {} def _get_invocation_params( self, stop: Optional[List[str]] = None, **kwargs: Any, ) -> dict: params = self.dict() params["stop"] = stop return {**params, **kwargs}
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-5
params["stop"] = stop return {**params, **kwargs} def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str: if self.lc_serializable: params = {**kwargs, **{"stop": stop}} param_string = str(sorted([(k, v) for k, v in params.items()])) llm_string = dumps(self) return llm_string + "---" + param_string else: params = self._get_invocation_params(stop=stop, **kwargs) params = {**params, **kwargs} return str(sorted([(k, v) for k, v in params.items()])) [docs] def generate( self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop} callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata, ) run_managers = callback_manager.on_chat_model_start( dumpd(self), messages, invocation_params=params, options=options ) results = [] for i, m in enumerate(messages): try: results.append( self._generate_with_cache( m, stop=stop,
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-6
self._generate_with_cache( m, stop=stop, run_manager=run_managers[i] if run_managers else None, **kwargs, ) ) except (KeyboardInterrupt, Exception) as e: if run_managers: run_managers[i].on_llm_error(e) raise e flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results ] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) if run_managers: run_infos = [] for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) run_infos.append(RunInfo(run_id=manager.run_id)) output.run = run_infos return output [docs] async def agenerate( self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop} callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags, metadata,
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-7
self.verbose, tags, self.tags, metadata, self.metadata, ) run_managers = await callback_manager.on_chat_model_start( dumpd(self), messages, invocation_params=params, options=options ) results = await asyncio.gather( *[ self._agenerate_with_cache( m, stop=stop, run_manager=run_managers[i] if run_managers else None, **kwargs, ) for i, m in enumerate(messages) ], return_exceptions=True, ) exceptions = [] for i, res in enumerate(results): if isinstance(res, Exception): if run_managers: await run_managers[i].on_llm_error(res) exceptions.append(res) if exceptions: if run_managers: await asyncio.gather( *[ run_manager.on_llm_end( LLMResult( generations=[res.generations], llm_output=res.llm_output ) ) for run_manager, res in zip(run_managers, results) if not isinstance(res, Exception) ] ) raise exceptions[0] flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results ] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) await asyncio.gather( *[ run_manager.on_llm_end(flattened_output)
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-8
*[ run_manager.on_llm_end(flattened_output) for run_manager, flattened_output in zip( run_managers, flattened_outputs ) ] ) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output [docs] def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) [docs] async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] return await self.agenerate( prompt_messages, stop=stop, callbacks=callbacks, **kwargs ) def _generate_with_cache( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache:
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html
2738c6adba9d-9
if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if new_arg_supported: return self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: return self._generate(messages, stop=stop, **kwargs) else: llm_string = self._get_llm_string(stop=stop, **kwargs) prompt = dumps(messages) cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): return ChatResult(generations=cache_val) else: if new_arg_supported: result = self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: result = self._generate(messages, stop=stop, **kwargs) langchain.llm_cache.update(prompt, llm_string, result.generations) return result async def _agenerate_with_cache( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache:
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html