id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
72cd09940d58-4
# TODO: Handle async ) -> "OpenAPIEndpointChain": """Create an OpenAPIEndpoint from a spec at the specified url.""" operation = APIOperation.from_openapi_url(spec_url, path, method) return cls.from_api_operation( operation, requests=requests, llm=llm, return_intermediate_steps=return_intermediate_steps, **kwargs, ) [docs] @classmethod def from_api_operation( cls, operation: APIOperation, llm: BaseLanguageModel, requests: Optional[Requests] = None, verbose: bool = False, return_intermediate_steps: bool = False, raw_response: bool = False, callbacks: Callbacks = None, **kwargs: Any # TODO: Handle async ) -> "OpenAPIEndpointChain": """Create an OpenAPIEndpointChain from an operation and a spec.""" param_mapping = _ParamMapping( query_params=operation.query_params, body_params=operation.body_params, path_params=operation.path_params, ) requests_chain = APIRequesterChain.from_llm_and_typescript( llm, typescript_definition=operation.to_typescript(), verbose=verbose, callbacks=callbacks, ) if raw_response: response_chain = None else: response_chain = APIResponderChain.from_llm( llm, verbose=verbose, callbacks=callbacks ) _requests = requests or Requests() return cls( api_request_chain=requests_chain, api_response_chain=response_chain, api_operation=operation, requests=_requests, param_mapping=param_mapping,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
72cd09940d58-5
requests=_requests, param_mapping=param_mapping, verbose=verbose, return_intermediate_steps=return_intermediate_steps, callbacks=callbacks, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
cf0a0fd0f692-0
Source code for langchain.chains.api.openapi.requests_chain """request parser.""" import json import re from typing import Any from langchain.base_language import BaseLanguageModel from langchain.chains.api.openapi.prompts import REQUEST_TEMPLATE from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser [docs]class APIRequesterOutputParser(BaseOutputParser): """Parse the request and error tags.""" def _load_json_block(self, serialized_block: str) -> str: try: return json.dumps(json.loads(serialized_block, strict=False)) except json.JSONDecodeError: return "ERROR serializing request." [docs] def parse(self, llm_output: str) -> str: """Parse the request and error tags.""" json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL) if json_match: return self._load_json_block(json_match.group(1).strip()) message_match = re.search(r"```text(.*?)```", llm_output, re.DOTALL) if message_match: return f"MESSAGE: {message_match.group(1).strip()}" return "ERROR making request" @property def _type(self) -> str: return "api_requester" [docs]class APIRequesterChain(LLMChain): """Get the request parser.""" [docs] @classmethod def from_llm_and_typescript( cls, llm: BaseLanguageModel, typescript_definition: str, verbose: bool = True, **kwargs: Any, ) -> LLMChain: """Get the request parser."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/requests_chain.html
cf0a0fd0f692-1
) -> LLMChain: """Get the request parser.""" output_parser = APIRequesterOutputParser() prompt = PromptTemplate( template=REQUEST_TEMPLATE, output_parser=output_parser, partial_variables={"schema": typescript_definition}, input_variables=["instructions"], ) return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/requests_chain.html
b611e4631285-0
Source code for langchain.chains.api.openapi.response_chain """Response parser.""" import json import re from typing import Any from langchain.base_language import BaseLanguageModel from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser [docs]class APIResponderOutputParser(BaseOutputParser): """Parse the response and error tags.""" def _load_json_block(self, serialized_block: str) -> str: try: response_content = json.loads(serialized_block, strict=False) return response_content.get("response", "ERROR parsing response.") except json.JSONDecodeError: return "ERROR parsing response." except: raise [docs] def parse(self, llm_output: str) -> str: """Parse the response and error tags.""" json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL) if json_match: return self._load_json_block(json_match.group(1).strip()) else: raise ValueError(f"No response found in output: {llm_output}.") @property def _type(self) -> str: return "api_responder" [docs]class APIResponderChain(LLMChain): """Get the response parser.""" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any ) -> LLMChain: """Get the response parser.""" output_parser = APIResponderOutputParser() prompt = PromptTemplate( template=RESPONSE_TEMPLATE, output_parser=output_parser,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/response_chain.html
b611e4631285-1
template=RESPONSE_TEMPLATE, output_parser=output_parser, input_variables=["response", "instructions"], ) return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/response_chain.html
cd17347bc766-0
Source code for langchain.chains.conversational_retrieval.base """Chain for chatting with a vector database.""" from __future__ import annotations import inspect import warnings from abc import abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMessage, BaseRetriever, Document from langchain.vectorstores.base import VectorStore # Depending on the memory type and configuration, the chat history format may differ. # This needs to be consolidated. CHAT_TURN_TYPE = Union[Tuple[str, str], BaseMessage] _ROLE_MAP = {"human": "Human: ", "ai": "Assistant: "} def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str: buffer = "" for dialogue_turn in chat_history: if isinstance(dialogue_turn, BaseMessage): role_prefix = _ROLE_MAP.get(dialogue_turn.type, f"{dialogue_turn.type}: ") buffer += f"\n{role_prefix}{dialogue_turn.content}" elif isinstance(dialogue_turn, tuple):
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-1
elif isinstance(dialogue_turn, tuple): human = "Human: " + dialogue_turn[0] ai = "Assistant: " + dialogue_turn[1] buffer += "\n" + "\n".join([human, ai]) else: raise ValueError( f"Unsupported chat history format: {type(dialogue_turn)}." f" Full chat history: {chat_history} " ) return buffer [docs]class BaseConversationalRetrievalChain(Chain): """Chain for chatting with an index.""" combine_docs_chain: BaseCombineDocumentsChain question_generator: LLMChain output_key: str = "answer" return_source_documents: bool = False return_generated_question: bool = False get_chat_history: Optional[Callable[[CHAT_TURN_TYPE], str]] = None """Return the source documents.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Input keys.""" return ["question", "chat_history"] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] if self.return_generated_question: _output_keys = _output_keys + ["generated_question"] return _output_keys @abstractmethod def _get_docs( self, question: str, inputs: Dict[str, Any], *,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-2
question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: callbacks = _run_manager.get_child() new_question = self.question_generator.run( question=question, chat_history=chat_history_str, callbacks=callbacks ) else: new_question = question accepts_run_manager = ( "run_manager" in inspect.signature(self._get_docs).parameters ) if accepts_run_manager: docs = self._get_docs(new_question, inputs, run_manager=_run_manager) else: docs = self._get_docs(new_question, inputs) # type: ignore[call-arg] new_inputs = inputs.copy() new_inputs["question"] = new_question new_inputs["chat_history"] = chat_history_str answer = self.combine_docs_chain.run( input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs ) output: Dict[str, Any] = {self.output_key: answer} if self.return_source_documents: output["source_documents"] = docs if self.return_generated_question: output["generated_question"] = new_question return output @abstractmethod
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-3
output["generated_question"] = new_question return output @abstractmethod async def _aget_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: callbacks = _run_manager.get_child() new_question = await self.question_generator.arun( question=question, chat_history=chat_history_str, callbacks=callbacks ) else: new_question = question accepts_run_manager = ( "run_manager" in inspect.signature(self._aget_docs).parameters ) if accepts_run_manager: docs = await self._aget_docs(new_question, inputs, run_manager=_run_manager) else: docs = await self._aget_docs(new_question, inputs) # type: ignore[call-arg] new_inputs = inputs.copy() new_inputs["question"] = new_question new_inputs["chat_history"] = chat_history_str answer = await self.combine_docs_chain.arun( input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs ) output: Dict[str, Any] = {self.output_key: answer}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-4
) output: Dict[str, Any] = {self.output_key: answer} if self.return_source_documents: output["source_documents"] = docs if self.return_generated_question: output["generated_question"] = new_question return output [docs] def save(self, file_path: Union[Path, str]) -> None: if self.get_chat_history: raise ValueError("Chain not savable when `get_chat_history` is not None.") super().save(file_path) [docs]class ConversationalRetrievalChain(BaseConversationalRetrievalChain): """Chain for chatting with an index.""" retriever: BaseRetriever """Index to connect to.""" max_tokens_limit: Optional[int] = None """If set, restricts the docs to return from store based on tokens, enforced only for StuffDocumentChain""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs) if self.max_tokens_limit and isinstance( self.combine_docs_chain, StuffDocumentsChain ): tokens = [ self.combine_docs_chain.llm_chain.llm.get_num_tokens(doc.page_content) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" docs = self.retriever.get_relevant_documents(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-5
"""Get docs.""" docs = self.retriever.get_relevant_documents( question, callbacks=run_manager.get_child() ) return self._reduce_tokens_below_limit(docs) async def _aget_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" docs = await self.retriever.aget_relevant_documents( question, callbacks=run_manager.get_child() ) return self._reduce_tokens_below_limit(docs) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, retriever: BaseRetriever, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, chain_type: str = "stuff", verbose: bool = False, condense_question_llm: Optional[BaseLanguageModel] = None, combine_docs_chain_kwargs: Optional[Dict] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseConversationalRetrievalChain: """Load chain from LLM.""" combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} doc_chain = load_qa_chain( llm, chain_type=chain_type, verbose=verbose, callbacks=callbacks, **combine_docs_chain_kwargs, ) _llm = condense_question_llm or llm condense_question_chain = LLMChain( llm=_llm, prompt=condense_question_prompt, verbose=verbose, callbacks=callbacks, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-6
verbose=verbose, callbacks=callbacks, ) return cls( retriever=retriever, combine_docs_chain=doc_chain, question_generator=condense_question_chain, callbacks=callbacks, **kwargs, ) [docs]class ChatVectorDBChain(BaseConversationalRetrievalChain): """Chain for chatting with a vector database.""" vectorstore: VectorStore = Field(alias="vectorstore") top_k_docs_for_context: int = 4 search_kwargs: dict = Field(default_factory=dict) @property def _chain_type(self) -> str: return "chat-vector-db" [docs] @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`ChatVectorDBChain` is deprecated - " "please use `from langchain.chains import ConversationalRetrievalChain`" ) return values def _get_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" vectordbkwargs = inputs.get("vectordbkwargs", {}) full_kwargs = {**self.search_kwargs, **vectordbkwargs} return self.vectorstore.similarity_search( question, k=self.top_k_docs_for_context, **full_kwargs ) async def _aget_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-7
) -> List[Document]: """Get docs.""" raise NotImplementedError("ChatVectorDBChain does not support async") [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, chain_type: str = "stuff", combine_docs_chain_kwargs: Optional[Dict] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseConversationalRetrievalChain: """Load chain from LLM.""" combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} doc_chain = load_qa_chain( llm, chain_type=chain_type, callbacks=callbacks, **combine_docs_chain_kwargs, ) condense_question_chain = LLMChain( llm=llm, prompt=condense_question_prompt, callbacks=callbacks ) return cls( vectorstore=vectorstore, combine_docs_chain=doc_chain, question_generator=condense_question_chain, callbacks=callbacks, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
67ab029c6ec9-0
Source code for langchain.chains.pal.base """Implements Program-Aided Language Models. As in https://arxiv.org/pdf/2211.10435.pdf. """ from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT from langchain.chains.pal.math_prompt import MATH_PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.utilities import PythonREPL [docs]class PALChain(Chain): """Implements Program-Aided Language Models.""" llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated]""" prompt: BasePromptTemplate = MATH_PROMPT """[Deprecated]""" stop: str = "\n\n" get_answer_expr: str = "print(solution())" python_globals: Optional[Dict[str, Any]] = None python_locals: Optional[Dict[str, Any]] = None output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
67ab029c6ec9-1
if "llm" in values: warnings.warn( "Directly instantiating an PALChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the one of " "the class method constructors from_math_prompt, " "from_colored_object_prompt." ) if "llm_chain" not in values and values["llm"] is not None: values["llm_chain"] = LLMChain(llm=values["llm"], prompt=MATH_PROMPT) return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, "intermediate_steps"] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() code = self.llm_chain.predict( stop=[self.stop], callbacks=_run_manager.get_child(), **inputs ) _run_manager.on_text(code, color="green", end="\n", verbose=self.verbose) repl = PythonREPL(_globals=self.python_globals, _locals=self.python_locals) res = repl.run(code + f"\n{self.get_answer_expr}") output = {self.output_key: res.strip()}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
67ab029c6ec9-2
output = {self.output_key: res.strip()} if self.return_intermediate_steps: output["intermediate_steps"] = code return output [docs] @classmethod def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain: """Load PAL from math prompt.""" llm_chain = LLMChain(llm=llm, prompt=MATH_PROMPT) return cls( llm_chain=llm_chain, stop="\n\n", get_answer_expr="print(solution())", **kwargs, ) [docs] @classmethod def from_colored_object_prompt( cls, llm: BaseLanguageModel, **kwargs: Any ) -> PALChain: """Load PAL from colored object prompt.""" llm_chain = LLMChain(llm=llm, prompt=COLORED_OBJECT_PROMPT) return cls( llm_chain=llm_chain, stop="\n\n\n", get_answer_expr="print(answer)", **kwargs, ) @property def _chain_type(self) -> str: return "pal_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
2dec832ca7c5-0
Source code for langchain.chains.router.embedding_router from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple, Type from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.router.base import RouterChain from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore [docs]class EmbeddingRouterChain(RouterChain): """Class that uses embeddings to route between options.""" vectorstore: VectorStore routing_keys: List[str] = ["query"] [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the LLM chain prompt expects. :meta private: """ return self.routing_keys def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _input = ", ".join([inputs[k] for k in self.routing_keys]) results = self.vectorstore.similarity_search(_input, k=1) return {"next_inputs": inputs, "destination": results[0].metadata["name"]} [docs] @classmethod def from_names_and_descriptions( cls, names_and_descriptions: Sequence[Tuple[str, Sequence[str]]], vectorstore_cls: Type[VectorStore], embeddings: Embeddings, **kwargs: Any, ) -> EmbeddingRouterChain: """Convenience constructor.""" documents = []
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/embedding_router.html
2dec832ca7c5-1
"""Convenience constructor.""" documents = [] for name, descriptions in names_and_descriptions: for description in descriptions: documents.append( Document(page_content=description, metadata={"name": name}) ) vectorstore = vectorstore_cls.from_documents(documents, embeddings) return cls(vectorstore=vectorstore, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/embedding_router.html
4e56959913bf-0
Source code for langchain.chains.router.multi_prompt """Use a single chain to route an input to one of multiple llm chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.llm import LLMChain from langchain.chains.router.base import MultiRouteChain, RouterChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE from langchain.prompts import PromptTemplate [docs]class MultiPromptChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst prompts.""" router_chain: RouterChain """Chain for deciding a destination chain and the input to it.""" destination_chains: Mapping[str, LLMChain] """Map of name to candidate chains that inputs can be routed to.""" default_chain: LLMChain """Default chain to use when router doesn't map input to one of the destinations.""" @property def output_keys(self) -> List[str]: return ["text"] [docs] @classmethod def from_prompts( cls, llm: BaseLanguageModel, prompt_infos: List[Dict[str, str]], default_chain: Optional[LLMChain] = None, **kwargs: Any, ) -> MultiPromptChain: """Convenience constructor for instantiating from destination prompts.""" destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos] destinations_str = "\n".join(destinations) router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_prompt.html
4e56959913bf-1
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(), ) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for p_info in prompt_infos: name = p_info["name"] prompt_template = p_info["prompt_template"] prompt = PromptTemplate(template=prompt_template, input_variables=["input"]) chain = LLMChain(llm=llm, prompt=prompt) destination_chains[name] = chain _default_chain = default_chain or ConversationChain(llm=llm, output_key="text") return cls( router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_prompt.html
f38444c62c6e-0
Source code for langchain.chains.router.multi_retrieval_qa """Use a single chain to route an input to one of multiple retrieval qa chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.base import Chain from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA from langchain.chains.router.base import MultiRouteChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_retrieval_prompt import ( MULTI_RETRIEVAL_ROUTER_TEMPLATE, ) from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever [docs]class MultiRetrievalQAChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst retrieval qa chains.""" router_chain: LLMRouterChain """Chain for deciding a destination chain and the input to it.""" destination_chains: Mapping[str, BaseRetrievalQA] """Map of name to candidate chains that inputs can be routed to.""" default_chain: Chain """Default chain to use when router doesn't map input to one of the destinations.""" @property def output_keys(self) -> List[str]: return ["result"] [docs] @classmethod def from_retrievers( cls, llm: BaseLanguageModel, retriever_infos: List[Dict[str, Any]], default_retriever: Optional[BaseRetriever] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
f38444c62c6e-1
default_retriever: Optional[BaseRetriever] = None, default_prompt: Optional[PromptTemplate] = None, default_chain: Optional[Chain] = None, **kwargs: Any, ) -> MultiRetrievalQAChain: if default_prompt and not default_retriever: raise ValueError( "`default_retriever` must be specified if `default_prompt` is " "provided. Received only `default_prompt`." ) destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos] destinations_str = "\n".join(destinations) router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(next_inputs_inner_key="query"), ) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for r_info in retriever_infos: prompt = r_info.get("prompt") retriever = r_info["retriever"] chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever) name = r_info["name"] destination_chains[name] = chain if default_chain: _default_chain = default_chain elif default_retriever: _default_chain = RetrievalQA.from_llm( llm, prompt=default_prompt, retriever=default_retriever ) else: prompt_template = DEFAULT_TEMPLATE.replace("input", "query") prompt = PromptTemplate(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
f38444c62c6e-2
prompt = PromptTemplate( template=prompt_template, input_variables=["history", "query"] ) _default_chain = ConversationChain( llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result" ) return cls( router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
c4edbc7bd3cb-0
Source code for langchain.chains.router.base """Base classes for chain routing.""" from __future__ import annotations from abc import ABC from typing import Any, Dict, List, Mapping, NamedTuple, Optional from pydantic import Extra from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain [docs]class Route(NamedTuple): destination: Optional[str] next_inputs: Dict[str, Any] [docs]class RouterChain(Chain, ABC): """Chain that outputs the name of a destination chain and the inputs to it.""" @property def output_keys(self) -> List[str]: return ["destination", "next_inputs"] [docs] def route(self, inputs: Dict[str, Any], callbacks: Callbacks = None) -> Route: result = self(inputs, callbacks=callbacks) return Route(result["destination"], result["next_inputs"]) [docs] async def aroute( self, inputs: Dict[str, Any], callbacks: Callbacks = None ) -> Route: result = await self.acall(inputs, callbacks=callbacks) return Route(result["destination"], result["next_inputs"]) [docs]class MultiRouteChain(Chain): """Use a single chain to route an input to one of multiple candidate chains.""" router_chain: RouterChain """Chain that routes inputs to destination chains.""" destination_chains: Mapping[str, Chain] """Chains that return final answer to inputs.""" default_chain: Chain """Default chain to use when none of the destination chains are suitable.""" silent_errors: bool = False
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
c4edbc7bd3cb-1
silent_errors: bool = False """If True, use default_chain when an invalid destination name is provided. Defaults to False.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the router chain prompt expects. :meta private: """ return self.router_chain.input_keys @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() route = self.router_chain.route(inputs, callbacks=callbacks) _run_manager.on_text( str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose ) if not route.destination: return self.default_chain(route.next_inputs, callbacks=callbacks) elif route.destination in self.destination_chains: return self.destination_chains[route.destination]( route.next_inputs, callbacks=callbacks ) elif self.silent_errors: return self.default_chain(route.next_inputs, callbacks=callbacks) else: raise ValueError( f"Received invalid destination chain name '{route.destination}'" ) async def _acall( self, inputs: Dict[str, Any],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
c4edbc7bd3cb-2
self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() route = await self.router_chain.aroute(inputs, callbacks=callbacks) _run_manager.on_text( str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose ) if not route.destination: return await self.default_chain.acall( route.next_inputs, callbacks=callbacks ) elif route.destination in self.destination_chains: return await self.destination_chains[route.destination].acall( route.next_inputs, callbacks=callbacks ) elif self.silent_errors: return await self.default_chain.acall( route.next_inputs, callbacks=callbacks ) else: raise ValueError( f"Received invalid destination chain name '{route.destination}'" )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
e808add899a0-0
Source code for langchain.chains.router.llm_router """Base classes for LLM-powered router chains.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type, cast from pydantic import root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains import LLMChain from langchain.chains.router.base import RouterChain from langchain.output_parsers.json import parse_and_check_json_markdown from langchain.prompts import BasePromptTemplate from langchain.schema import BaseOutputParser, OutputParserException [docs]class LLMRouterChain(RouterChain): """A router chain that uses an LLM chain to perform routing.""" llm_chain: LLMChain """LLM chain used to perform routing""" [docs] @root_validator() def validate_prompt(cls, values: dict) -> dict: prompt = values["llm_chain"].prompt if prompt.output_parser is None: raise ValueError( "LLMRouterChain requires base llm_chain prompt to have an output" " parser that converts LLM text output to a dictionary with keys" " 'destination' and 'next_inputs'. Received a prompt with no output" " parser." ) return values @property def input_keys(self) -> List[str]: """Will be whatever keys the LLM chain prompt expects. :meta private: """ return self.llm_chain.input_keys def _validate_outputs(self, outputs: Dict[str, Any]) -> None: super()._validate_outputs(outputs) if not isinstance(outputs["next_inputs"], dict): raise ValueError
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
e808add899a0-1
if not isinstance(outputs["next_inputs"], dict): raise ValueError def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() output = cast( Dict[str, Any], self.llm_chain.predict_and_parse(callbacks=callbacks, **inputs), ) return output async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() output = cast( Dict[str, Any], await self.llm_chain.apredict_and_parse(callbacks=callbacks, **inputs), ) return output [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **kwargs: Any ) -> LLMRouterChain: """Convenience constructor.""" llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs) [docs]class RouterOutputParser(BaseOutputParser[Dict[str, str]]): """Parser for output of router chain int he multi-prompt chain.""" default_destination: str = "DEFAULT" next_inputs_type: Type = str next_inputs_inner_key: str = "input"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
e808add899a0-2
next_inputs_inner_key: str = "input" [docs] def parse(self, text: str) -> Dict[str, Any]: try: expected_keys = ["destination", "next_inputs"] parsed = parse_and_check_json_markdown(text, expected_keys) if not isinstance(parsed["destination"], str): raise ValueError("Expected 'destination' to be a string.") if not isinstance(parsed["next_inputs"], self.next_inputs_type): raise ValueError( f"Expected 'next_inputs' to be {self.next_inputs_type}." ) parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]} if ( parsed["destination"].strip().lower() == self.default_destination.lower() ): parsed["destination"] = None else: parsed["destination"] = parsed["destination"].strip() return parsed except Exception as e: raise OutputParserException( f"Parsing text\n{text}\n raised following error:\n{e}" )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
0469f9a8fdb7-0
Source code for langchain.chains.llm_bash.prompt # flake8: noqa from __future__ import annotations import re from typing import List from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser, OutputParserException _PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format: Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'" I need to take the following actions: - List all files in the directory - Create a new directory - Copy the files from the first directory into the second directory ```bash ls mkdir myNewDirectory cp -r target/* myNewDirectory ``` That is the format. Begin! Question: {question}""" [docs]class BashOutputParser(BaseOutputParser): """Parser for bash output.""" [docs] def parse(self, text: str) -> List[str]: if "```bash" in text: return self.get_code_blocks(text) else: raise OutputParserException( f"Failed to parse bash output. Got: {text}", ) [docs] @staticmethod def get_code_blocks(t: str) -> List[str]: """Get multiple code blocks from the LLM result.""" code_blocks: List[str] = [] # Bash markdown code blocks pattern = re.compile(r"```bash(.*?)(?:\n\s*)```", re.DOTALL) for match in pattern.finditer(t):
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/prompt.html
0469f9a8fdb7-1
for match in pattern.finditer(t): matched = match.group(1).strip() if matched: code_blocks.extend( [line for line in matched.split("\n") if line.strip()] ) return code_blocks @property def _type(self) -> str: return "bash" PROMPT = PromptTemplate( input_variables=["question"], template=_PROMPT_TEMPLATE, output_parser=BashOutputParser(), )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/prompt.html
ec354e6e418c-0
Source code for langchain.chains.llm_bash.base """Chain that interprets a prompt and executes bash code to perform bash operations.""" from __future__ import annotations import logging import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.prompt import PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import OutputParserException from langchain.utilities.bash import BashProcess logger = logging.getLogger(__name__) [docs]class LLMBashChain(Chain): """Chain that interprets a prompt and executes bash code to perform bash operations. Example: .. code-block:: python from langchain import LLMBashChain, OpenAI llm_bash = LLMBashChain.from_llm(OpenAI()) """ llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: prompt: BasePromptTemplate = PROMPT """[Deprecated]""" bash_process: BashProcess = Field(default_factory=BashProcess) #: :meta private: [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
ec354e6e418c-1
def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an LLMBashChain with an llm is deprecated. " "Please instantiate with llm_chain or using the from_llm class method." ) if "llm_chain" not in values and values["llm"] is not None: prompt = values.get("prompt", PROMPT) values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) return values [docs] @root_validator def validate_prompt(cls, values: Dict) -> Dict: if values["llm_chain"].prompt.output_parser is None: raise ValueError( "The prompt used by llm_chain is expected to have an output_parser." ) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key], verbose=self.verbose) t = self.llm_chain.predict( question=inputs[self.input_key], callbacks=_run_manager.get_child() )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
ec354e6e418c-2
question=inputs[self.input_key], callbacks=_run_manager.get_child() ) _run_manager.on_text(t, color="green", verbose=self.verbose) t = t.strip() try: parser = self.llm_chain.prompt.output_parser command_list = parser.parse(t) # type: ignore[union-attr] except OutputParserException as e: _run_manager.on_chain_error(e, verbose=self.verbose) raise e if self.verbose: _run_manager.on_text("\nCode: ", verbose=self.verbose) _run_manager.on_text( str(command_list), color="yellow", verbose=self.verbose ) output = self.bash_process.run(command_list) _run_manager.on_text("\nAnswer: ", verbose=self.verbose) _run_manager.on_text(output, color="yellow", verbose=self.verbose) return {self.output_key: output} @property def _chain_type(self) -> str: return "llm_bash_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate = PROMPT, **kwargs: Any, ) -> LLMBashChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
cf98a0af4101-0
Source code for langchain.chains.natbot.crawler # flake8: noqa import time from sys import platform from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, TypedDict, Union, ) if TYPE_CHECKING: from playwright.sync_api import Browser, CDPSession, Page, sync_playwright black_listed_elements: Set[str] = { "html", "head", "title", "meta", "iframe", "body", "script", "style", "path", "svg", "br", "::marker", } [docs]class ElementInViewPort(TypedDict): """A typed dictionary containing information about elements in the viewport.""" node_index: str backend_node_id: int node_name: Optional[str] node_value: Optional[str] node_meta: List[str] is_clickable: bool origin_x: int origin_y: int center_x: int center_y: int class Crawler: def __init__(self) -> None: try: from playwright.sync_api import sync_playwright except ImportError: raise ImportError( "Could not import playwright python package. " "Please install it with `pip install playwright`." ) self.browser: Browser = ( sync_playwright().start().chromium.launch(headless=False) ) self.page: Page = self.browser.new_page() self.page.set_viewport_size({"width": 1280, "height": 1080})
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-1
self.page_element_buffer: Dict[int, ElementInViewPort] self.client: CDPSession def go_to_page(self, url: str) -> None: self.page.goto(url=url if "://" in url else "http://" + url) self.client = self.page.context.new_cdp_session(self.page) self.page_element_buffer = {} def scroll(self, direction: str) -> None: if direction == "up": self.page.evaluate( "(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;" ) elif direction == "down": self.page.evaluate( "(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;" ) def click(self, id: Union[str, int]) -> None: # Inject javascript into the page which removes the target= attribute from all links js = """ links = document.getElementsByTagName("a"); for (var i = 0; i < links.length; i++) { links[i].removeAttribute("target"); } """ self.page.evaluate(js) element = self.page_element_buffer.get(int(id)) if element: x: float = element["center_x"] y: float = element["center_y"] self.page.mouse.click(x, y) else: print("Could not find element") def type(self, id: Union[str, int], text: str) -> None: self.click(id) self.page.keyboard.type(text) def enter(self) -> None: self.page.keyboard.press("Enter") def crawl(self) -> List[str]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-2
self.page.keyboard.press("Enter") def crawl(self) -> List[str]: page = self.page page_element_buffer = self.page_element_buffer start = time.time() page_state_as_text = [] device_pixel_ratio: float = page.evaluate("window.devicePixelRatio") if platform == "darwin" and device_pixel_ratio == 1: # lies device_pixel_ratio = 2 win_upper_bound: float = page.evaluate("window.pageYOffset") win_left_bound: float = page.evaluate("window.pageXOffset") win_width: float = page.evaluate("window.screen.width") win_height: float = page.evaluate("window.screen.height") win_right_bound: float = win_left_bound + win_width win_lower_bound: float = win_upper_bound + win_height # percentage_progress_start = (win_upper_bound / document_scroll_height) * 100 # percentage_progress_end = ( # (win_height + win_upper_bound) / document_scroll_height # ) * 100 percentage_progress_start = 1 percentage_progress_end = 2 page_state_as_text.append( { "x": 0, "y": 0, "text": "[scrollbar {:0.2f}-{:0.2f}%]".format( round(percentage_progress_start, 2), round(percentage_progress_end) ), } ) tree = self.client.send( "DOMSnapshot.captureSnapshot", {"computedStyles": [], "includeDOMRects": True, "includePaintOrder": True}, ) strings: Dict[int, str] = tree["strings"] document: Dict[str, Any] = tree["documents"][0]
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-3
document: Dict[str, Any] = tree["documents"][0] nodes: Dict[str, Any] = document["nodes"] backend_node_id: Dict[int, int] = nodes["backendNodeId"] attributes: Dict[int, Dict[int, Any]] = nodes["attributes"] node_value: Dict[int, int] = nodes["nodeValue"] parent: Dict[int, int] = nodes["parentIndex"] node_names: Dict[int, int] = nodes["nodeName"] is_clickable: Set[int] = set(nodes["isClickable"]["index"]) input_value: Dict[str, Any] = nodes["inputValue"] input_value_index: List[int] = input_value["index"] input_value_values: List[int] = input_value["value"] layout: Dict[str, Any] = document["layout"] layout_node_index: List[int] = layout["nodeIndex"] bounds: Dict[int, List[float]] = layout["bounds"] cursor: int = 0 child_nodes: Dict[str, List[Dict[str, Any]]] = {} elements_in_view_port: List[ElementInViewPort] = [] anchor_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {"-1": (False, None)} button_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {"-1": (False, None)} def convert_name( node_name: Optional[str], has_click_handler: Optional[bool] ) -> str: if node_name == "a": return "link" if node_name == "input": return "input" if node_name == "img": return "img" if (
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-4
if node_name == "img": return "img" if ( node_name == "button" or has_click_handler ): # found pages that needed this quirk return "button" else: return "text" def find_attributes( attributes: Dict[int, Any], keys: List[str] ) -> Dict[str, str]: values = {} for [key_index, value_index] in zip(*(iter(attributes),) * 2): if value_index < 0: continue key = strings[key_index] value = strings[value_index] if key in keys: values[key] = value keys.remove(key) if not keys: return values return values def add_to_hash_tree( hash_tree: Dict[str, Tuple[bool, Optional[int]]], tag: str, node_id: int, node_name: Optional[str], parent_id: int, ) -> Tuple[bool, Optional[int]]: parent_id_str = str(parent_id) if not parent_id_str in hash_tree: parent_name = strings[node_names[parent_id]].lower() grand_parent_id = parent[parent_id] add_to_hash_tree( hash_tree, tag, parent_id, parent_name, grand_parent_id ) is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str] # even if the anchor is nested in another anchor, we set the "root" for all descendants to be ::Self if node_name == tag: value: Tuple[bool, Optional[int]] = (True, node_id) elif ( is_parent_desc_anchor
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-5
elif ( is_parent_desc_anchor ): # reuse the parent's anchor_id (which could be much higher in the tree) value = (True, anchor_id) else: value = ( False, None, ) # not a descendant of an anchor, most likely it will become text, an interactive element or discarded hash_tree[str(node_id)] = value return value for index, node_name_index in enumerate(node_names): node_parent = parent[index] node_name: Optional[str] = strings[node_name_index].lower() is_ancestor_of_anchor, anchor_id = add_to_hash_tree( anchor_ancestry, "a", index, node_name, node_parent ) is_ancestor_of_button, button_id = add_to_hash_tree( button_ancestry, "button", index, node_name, node_parent ) try: cursor = layout_node_index.index( index ) # todo replace this with proper cursoring, ignoring the fact this is O(n^2) for the moment except: continue if node_name in black_listed_elements: continue [x, y, width, height] = bounds[cursor] x /= device_pixel_ratio y /= device_pixel_ratio width /= device_pixel_ratio height /= device_pixel_ratio elem_left_bound = x elem_top_bound = y elem_right_bound = x + width elem_lower_bound = y + height partially_is_in_viewport = ( elem_left_bound < win_right_bound and elem_right_bound >= win_left_bound and elem_top_bound < win_lower_bound and elem_lower_bound >= win_upper_bound )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-6
and elem_lower_bound >= win_upper_bound ) if not partially_is_in_viewport: continue meta_data: List[str] = [] # inefficient to grab the same set of keys for kinds of objects, but it's fine for now element_attributes = find_attributes( attributes[index], ["type", "placeholder", "aria-label", "title", "alt"] ) ancestor_exception = is_ancestor_of_anchor or is_ancestor_of_button ancestor_node_key = ( None if not ancestor_exception else str(anchor_id) if is_ancestor_of_anchor else str(button_id) ) ancestor_node = ( None if not ancestor_exception else child_nodes.setdefault(str(ancestor_node_key), []) ) if node_name == "#text" and ancestor_exception and ancestor_node: text = strings[node_value[index]] if text == "|" or text == "•": continue ancestor_node.append({"type": "type", "value": text}) else: if ( node_name == "input" and element_attributes.get("type") == "submit" ) or node_name == "button": node_name = "button" element_attributes.pop( "type", None ) # prevent [button ... (button)..] for key in element_attributes: if ancestor_exception and ancestor_node: ancestor_node.append( { "type": "attribute", "key": key, "value": element_attributes[key], } ) else: meta_data.append(element_attributes[key]) element_node_value = None if node_value[index] >= 0:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-7
element_node_value = None if node_value[index] >= 0: element_node_value = strings[node_value[index]] if ( element_node_value == "|" ): # commonly used as a separator, does not add much context - lets save ourselves some token space continue elif ( node_name == "input" and index in input_value_index and element_node_value is None ): node_input_text_index = input_value_index.index(index) text_index = input_value_values[node_input_text_index] if node_input_text_index >= 0 and text_index >= 0: element_node_value = strings[text_index] # remove redudant elements if ancestor_exception and (node_name != "a" and node_name != "button"): continue elements_in_view_port.append( { "node_index": str(index), "backend_node_id": backend_node_id[index], "node_name": node_name, "node_value": element_node_value, "node_meta": meta_data, "is_clickable": index in is_clickable, "origin_x": int(x), "origin_y": int(y), "center_x": int(x + (width / 2)), "center_y": int(y + (height / 2)), } ) # lets filter further to remove anything that does not hold any text nor has click handlers + merge text from leaf#text nodes with the parent elements_of_interest = [] id_counter = 0 for element in elements_in_view_port: node_index = element.get("node_index") node_name = element.get("node_name") element_node_value = element.get("node_value")
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-8
element_node_value = element.get("node_value") node_is_clickable = element.get("is_clickable") node_meta_data: Optional[List[str]] = element.get("node_meta") inner_text = f"{element_node_value} " if element_node_value else "" meta = "" if node_index in child_nodes: for child in child_nodes[node_index]: entry_type = child.get("type") entry_value = child.get("value") if entry_type == "attribute" and node_meta_data: entry_key = child.get("key") node_meta_data.append(f'{entry_key}="{entry_value}"') else: inner_text += f"{entry_value} " if node_meta_data: meta_string = " ".join(node_meta_data) meta = f" {meta_string}" if inner_text != "": inner_text = f"{inner_text.strip()}" converted_node_name = convert_name(node_name, node_is_clickable) # not very elegant, more like a placeholder if ( (converted_node_name != "button" or meta == "") and converted_node_name != "link" and converted_node_name != "input" and converted_node_name != "img" and converted_node_name != "textarea" ) and inner_text.strip() == "": continue page_element_buffer[id_counter] = element if inner_text != "": elements_of_interest.append( f"""<{converted_node_name} id={id_counter}{meta}>{inner_text}</{converted_node_name}>""" ) else: elements_of_interest.append( f"""<{converted_node_name} id={id_counter}{meta}/>""" ) id_counter += 1
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-9
) id_counter += 1 print("Parsing time: {:0.2f} seconds".format(time.time() - start)) return elements_of_interest
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
217de4f6fe1d-0
Source code for langchain.chains.natbot.base """Implement an LLM driven browser.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.natbot.prompt import PROMPT from langchain.llms.openai import OpenAI [docs]class NatBotChain(Chain): """Implement an LLM driven browser. Example: .. code-block:: python from langchain import NatBotChain natbot = NatBotChain.from_default("Buy me a new hat.") """ llm_chain: LLMChain objective: str """Objective that NatBot is tasked with completing.""" llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" input_url_key: str = "url" #: :meta private: input_browser_content_key: str = "browser_content" #: :meta private: previous_command: str = "" #: :meta private: output_key: str = "command" #: :meta private: [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an NatBotChain with an llm is deprecated. "
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
217de4f6fe1d-1
"Directly instantiating an NatBotChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: values["llm_chain"] = LLMChain(llm=values["llm"], prompt=PROMPT) return values [docs] @classmethod def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain: """Load with default LLMChain.""" llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50) return cls.from_llm(llm, objective, **kwargs) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, objective: str, **kwargs: Any ) -> NatBotChain: """Load from LLM.""" llm_chain = LLMChain(llm=llm, prompt=PROMPT) return cls(llm_chain=llm_chain, objective=objective, **kwargs) @property def input_keys(self) -> List[str]: """Expect url and browser content. :meta private: """ return [self.input_url_key, self.input_browser_content_key] @property def output_keys(self) -> List[str]: """Return command. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
217de4f6fe1d-2
) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() url = inputs[self.input_url_key] browser_content = inputs[self.input_browser_content_key] llm_cmd = self.llm_chain.predict( objective=self.objective, url=url[:100], previous_command=self.previous_command, browser_content=browser_content[:4500], callbacks=_run_manager.get_child(), ) llm_cmd = llm_cmd.strip() self.previous_command = llm_cmd return {self.output_key: llm_cmd} [docs] def execute(self, url: str, browser_content: str) -> str: """Figure out next browser command to run. Args: url: URL of the site currently on. browser_content: Content of the page as currently displayed by the browser. Returns: Next browser command to run. Example: .. code-block:: python browser_content = "...." llm_command = natbot.run("www.google.com", browser_content) """ _inputs = { self.input_url_key: url, self.input_browser_content_key: browser_content, } return self(_inputs)[self.output_key] @property def _chain_type(self) -> str: return "nat_bot_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
37c4da09001c-0
Source code for langchain.chains.question_answering.__init__ """Load question answering chains.""" from typing import Any, Mapping, Optional, Protocol from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.question_answering import ( map_reduce_prompt, refine_prompts, stuff_prompt, ) from langchain.chains.question_answering.map_rerank_prompt import ( PROMPT as MAP_RERANK_PROMPT, ) from langchain.prompts.base import BasePromptTemplate [docs]class LoadingCallable(Protocol): """Interface for loading the combine documents chain.""" [docs] def __call__( self, llm: BaseLanguageModel, **kwargs: Any ) -> BaseCombineDocumentsChain: """Callable to load the combine documents chain.""" def _load_map_rerank_chain( llm: BaseLanguageModel, prompt: BasePromptTemplate = MAP_RERANK_PROMPT, verbose: bool = False, document_variable_name: str = "context", rank_key: str = "score", answer_key: str = "answer", callback_manager: Optional[BaseCallbackManager] = None, callbacks: Callbacks = None, **kwargs: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-1
callbacks: Callbacks = None, **kwargs: Any, ) -> MapRerankDocumentsChain: llm_chain = LLMChain( llm=llm, prompt=prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) return MapRerankDocumentsChain( llm_chain=llm_chain, rank_key=rank_key, answer_key=answer_key, document_variable_name=document_variable_name, verbose=verbose, callback_manager=callback_manager, **kwargs, ) def _load_stuff_chain( llm: BaseLanguageModel, prompt: Optional[BasePromptTemplate] = None, document_variable_name: str = "context", verbose: Optional[bool] = None, callback_manager: Optional[BaseCallbackManager] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> StuffDocumentsChain: _prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain( llm=llm, prompt=_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) # TODO: document prompt return StuffDocumentsChain( llm_chain=llm_chain, document_variable_name=document_variable_name, verbose=verbose, callback_manager=callback_manager, **kwargs, ) def _load_map_reduce_chain( llm: BaseLanguageModel, question_prompt: Optional[BasePromptTemplate] = None, combine_prompt: Optional[BasePromptTemplate] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-2
combine_prompt: Optional[BasePromptTemplate] = None, combine_document_variable_name: str = "summaries", map_reduce_document_variable_name: str = "context", collapse_prompt: Optional[BasePromptTemplate] = None, reduce_llm: Optional[BaseLanguageModel] = None, collapse_llm: Optional[BaseLanguageModel] = None, verbose: Optional[bool] = None, callback_manager: Optional[BaseCallbackManager] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> MapReduceDocumentsChain: _question_prompt = ( question_prompt or map_reduce_prompt.QUESTION_PROMPT_SELECTOR.get_prompt(llm) ) _combine_prompt = ( combine_prompt or map_reduce_prompt.COMBINE_PROMPT_SELECTOR.get_prompt(llm) ) map_chain = LLMChain( llm=llm, prompt=_question_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) _reduce_llm = reduce_llm or llm reduce_chain = LLMChain( llm=_reduce_llm, prompt=_combine_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) # TODO: document prompt combine_document_chain = StuffDocumentsChain( llm_chain=reduce_chain, document_variable_name=combine_document_variable_name, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) if collapse_prompt is None: collapse_chain = None if collapse_llm is not None: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-3
if collapse_llm is not None: raise ValueError( "collapse_llm provided, but collapse_prompt was not: please " "provide one or stop providing collapse_llm." ) else: _collapse_llm = collapse_llm or llm collapse_chain = StuffDocumentsChain( llm_chain=LLMChain( llm=_collapse_llm, prompt=collapse_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ), document_variable_name=combine_document_variable_name, verbose=verbose, callback_manager=callback_manager, ) return MapReduceDocumentsChain( llm_chain=map_chain, combine_document_chain=combine_document_chain, document_variable_name=map_reduce_document_variable_name, collapse_document_chain=collapse_chain, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, **kwargs, ) def _load_refine_chain( llm: BaseLanguageModel, question_prompt: Optional[BasePromptTemplate] = None, refine_prompt: Optional[BasePromptTemplate] = None, document_variable_name: str = "context_str", initial_response_name: str = "existing_answer", refine_llm: Optional[BaseLanguageModel] = None, verbose: Optional[bool] = None, callback_manager: Optional[BaseCallbackManager] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> RefineDocumentsChain: _question_prompt = ( question_prompt or refine_prompts.QUESTION_PROMPT_SELECTOR.get_prompt(llm) )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-4
) _refine_prompt = refine_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt( llm ) initial_chain = LLMChain( llm=llm, prompt=_question_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) _refine_llm = refine_llm or llm refine_chain = LLMChain( llm=_refine_llm, prompt=_refine_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) return RefineDocumentsChain( initial_llm_chain=initial_chain, refine_llm_chain=refine_chain, document_variable_name=document_variable_name, initial_response_name=initial_response_name, verbose=verbose, callback_manager=callback_manager, **kwargs, ) [docs]def load_qa_chain( llm: BaseLanguageModel, chain_type: str = "stuff", verbose: Optional[bool] = None, callback_manager: Optional[BaseCallbackManager] = None, **kwargs: Any, ) -> BaseCombineDocumentsChain: """Load question answering chain. Args: llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", "map_rerank", and "refine". verbose: Whether chains should be run in verbose mode or not. Note that this applies to all chains that make up the final chain. callback_manager: Callback manager to use for the chain. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-5
callback_manager: Callback manager to use for the chain. Returns: A chain to use for question answering. """ loader_mapping: Mapping[str, LoadingCallable] = { "stuff": _load_stuff_chain, "map_reduce": _load_map_reduce_chain, "refine": _load_refine_chain, "map_rerank": _load_map_rerank_chain, } if chain_type not in loader_mapping: raise ValueError( f"Got unsupported chain type: {chain_type}. " f"Should be one of {loader_mapping.keys()}" ) return loader_mapping[chain_type]( llm, verbose=verbose, callback_manager=callback_manager, **kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
6638c626e013-0
Source code for langchain.chains.sql_database.base """Chain for interacting with SQL Database.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.sql_database import SQLDatabase from langchain.tools.sql_database.prompt import QUERY_CHECKER INTERMEDIATE_STEPS_KEY = "intermediate_steps" [docs]class SQLDatabaseChain(Chain): """Chain for interacting with SQL Database. Example: .. code-block:: python from langchain import SQLDatabaseChain, OpenAI, SQLDatabase db = SQLDatabase(...) db_chain = SQLDatabaseChain.from_llm(OpenAI(), db) """ llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" database: SQLDatabase = Field(exclude=True) """SQL Database to connect to.""" prompt: Optional[BasePromptTemplate] = None """[Deprecated] Prompt to use to translate natural language to SQL.""" top_k: int = 5 """Number of results to return from the query""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-1
return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the SQL table directly.""" use_query_checker: bool = False """Whether or not the query checker tool should be used to attempt to fix the initial SQL from the LLM.""" query_checker_prompt: Optional[BasePromptTemplate] = None """The prompt template that should be used by the query checker""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an SQLDatabaseChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: database = values["database"] prompt = values.get("prompt") or SQL_PROMPTS.get( database.dialect, PROMPT ) values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-2
"""Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() input_text = f"{inputs[self.input_key]}\nSQLQuery:" _run_manager.on_text(input_text, verbose=self.verbose) # If not present, then defaults to None which is all tables. table_names_to_use = inputs.get("table_names_to_use") table_info = self.database.get_table_info(table_names=table_names_to_use) llm_inputs = { "input": input_text, "top_k": str(self.top_k), "dialect": self.database.dialect, "table_info": table_info, "stop": ["\nSQLResult:"], } intermediate_steps: List = [] try: intermediate_steps.append(llm_inputs) # input: sql generation sql_cmd = self.llm_chain.predict( callbacks=_run_manager.get_child(), **llm_inputs, ).strip() if not self.use_query_checker: _run_manager.on_text(sql_cmd, color="green", verbose=self.verbose) intermediate_steps.append( sql_cmd ) # output: sql generation (no checker) intermediate_steps.append({"sql_cmd": sql_cmd}) # input: sql exec result = self.database.run(sql_cmd)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-3
result = self.database.run(sql_cmd) intermediate_steps.append(str(result)) # output: sql exec else: query_checker_prompt = self.query_checker_prompt or PromptTemplate( template=QUERY_CHECKER, input_variables=["query", "dialect"] ) query_checker_chain = LLMChain( llm=self.llm_chain.llm, prompt=query_checker_prompt ) query_checker_inputs = { "query": sql_cmd, "dialect": self.database.dialect, } checked_sql_command: str = query_checker_chain.predict( callbacks=_run_manager.get_child(), **query_checker_inputs ).strip() intermediate_steps.append( checked_sql_command ) # output: sql generation (checker) _run_manager.on_text( checked_sql_command, color="green", verbose=self.verbose ) intermediate_steps.append( {"sql_cmd": checked_sql_command} ) # input: sql exec result = self.database.run(checked_sql_command) intermediate_steps.append(str(result)) # output: sql exec sql_cmd = checked_sql_command _run_manager.on_text("\nSQLResult: ", verbose=self.verbose) _run_manager.on_text(result, color="yellow", verbose=self.verbose) # If return direct, we just set the final result equal to # the result of the sql query result, otherwise try to get a human readable # final answer if self.return_direct: final_result = result else: _run_manager.on_text("\nAnswer:", verbose=self.verbose) input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:" llm_inputs["input"] = input_text
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-4
llm_inputs["input"] = input_text intermediate_steps.append(llm_inputs) # input: final answer final_result = self.llm_chain.predict( callbacks=_run_manager.get_child(), **llm_inputs, ).strip() intermediate_steps.append(final_result) # output: final answer _run_manager.on_text(final_result, color="green", verbose=self.verbose) chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result except Exception as exc: # Append intermediate steps to exception, to aid in logging and later # improvement of few shot prompt seeds exc.intermediate_steps = intermediate_steps # type: ignore raise exc @property def _chain_type(self) -> str: return "sql_database_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, db: SQLDatabase, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> SQLDatabaseChain: prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT) llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, database=db, **kwargs) [docs]class SQLDatabaseSequentialChain(Chain): """Chain for querying SQL database that is a sequential chain. The chain is as follows: 1. Based on the query, determine which tables to use. 2. Based on those tables, call the normal SQL database chain.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-5
2. Based on those tables, call the normal SQL database chain. This is useful in cases where the number of tables in the database is large. """ decider_chain: LLMChain sql_chain: SQLDatabaseChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, database: SQLDatabase, query_prompt: BasePromptTemplate = PROMPT, decider_prompt: BasePromptTemplate = DECIDER_PROMPT, **kwargs: Any, ) -> SQLDatabaseSequentialChain: """Load the necessary chains.""" sql_chain = SQLDatabaseChain.from_llm( llm, database, prompt=query_prompt, **kwargs ) decider_chain = LLMChain( llm=llm, prompt=decider_prompt, output_key="table_names" ) return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs) @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY] def _call( self, inputs: Dict[str, Any],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-6
def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _table_names = self.sql_chain.database.get_usable_table_names() table_names = ", ".join(_table_names) llm_inputs = { "query": inputs[self.input_key], "table_names": table_names, } _lowercased_table_names = [name.lower() for name in _table_names] table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs) table_names_to_use = [ name for name in table_names_from_chain if name.lower() in _lowercased_table_names ] _run_manager.on_text("Table names to use:", end="\n", verbose=self.verbose) _run_manager.on_text( str(table_names_to_use), color="yellow", verbose=self.verbose ) new_inputs = { self.sql_chain.input_key: inputs[self.input_key], "table_names_to_use": table_names_to_use, } return self.sql_chain( new_inputs, callbacks=_run_manager.get_child(), return_only_outputs=True ) @property def _chain_type(self) -> str: return "sql_database_sequential_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
c73402a76270-0
Source code for langchain.chains.flare.prompts from typing import Tuple from langchain.prompts import PromptTemplate from langchain.schema import BaseOutputParser [docs]class FinishedOutputParser(BaseOutputParser[Tuple[str, bool]]): finished_value: str = "FINISHED" [docs] def parse(self, text: str) -> Tuple[str, bool]: cleaned = text.strip() finished = self.finished_value in cleaned return cleaned.replace(self.finished_value, ""), finished PROMPT_TEMPLATE = """\ Respond to the user message using any relevant context. \ If context is provided, you should ground your answer in that context. \ Once you're done responding return FINISHED. >>> CONTEXT: {context} >>> USER INPUT: {user_input} >>> RESPONSE: {response}\ """ PROMPT = PromptTemplate( template=PROMPT_TEMPLATE, input_variables=["user_input", "context", "response"], ) QUESTION_GENERATOR_PROMPT_TEMPLATE = """\ Given a user input and an existing partial response as context, \ ask a question to which the answer is the given term/entity/phrase: >>> USER INPUT: {user_input} >>> EXISTING PARTIAL RESPONSE: {current_response} The question to which the answer is the term/entity/phrase "{uncertain_span}" is:""" QUESTION_GENERATOR_PROMPT = PromptTemplate( template=QUESTION_GENERATOR_PROMPT_TEMPLATE, input_variables=["user_input", "current_response", "uncertain_span"], )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/prompts.html
106956c91fd6-0
Source code for langchain.chains.flare.base from __future__ import annotations import re from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple import numpy as np from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.flare.prompts import ( PROMPT, QUESTION_GENERATOR_PROMPT, FinishedOutputParser, ) from langchain.chains.llm import LLMChain from langchain.llms import OpenAI from langchain.prompts import BasePromptTemplate from langchain.schema import BaseRetriever, Generation class _ResponseChain(LLMChain): prompt: BasePromptTemplate = PROMPT @property def input_keys(self) -> List[str]: return self.prompt.input_variables def generate_tokens_and_log_probs( self, _input: Dict[str, Any], *, run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Tuple[Sequence[str], Sequence[float]]: llm_result = self.generate([_input], run_manager=run_manager) return self._extract_tokens_and_log_probs(llm_result.generations[0]) @abstractmethod def _extract_tokens_and_log_probs( self, generations: List[Generation] ) -> Tuple[Sequence[str], Sequence[float]]: """Extract tokens and log probs from response.""" class _OpenAIResponseChain(_ResponseChain): llm: OpenAI = Field( default_factory=lambda: OpenAI( max_tokens=32, model_kwargs={"logprobs": 1}, temperature=0 )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
106956c91fd6-1
) ) def _extract_tokens_and_log_probs( self, generations: List[Generation] ) -> Tuple[Sequence[str], Sequence[float]]: tokens = [] log_probs = [] for gen in generations: if gen.generation_info is None: raise ValueError tokens.extend(gen.generation_info["logprobs"]["tokens"]) log_probs.extend(gen.generation_info["logprobs"]["token_logprobs"]) return tokens, log_probs [docs]class QuestionGeneratorChain(LLMChain): prompt: BasePromptTemplate = QUESTION_GENERATOR_PROMPT @property def input_keys(self) -> List[str]: return ["user_input", "context", "response"] def _low_confidence_spans( tokens: Sequence[str], log_probs: Sequence[float], min_prob: float, min_token_gap: int, num_pad_tokens: int, ) -> List[str]: _low_idx = np.where(np.exp(log_probs) < min_prob)[0] low_idx = [i for i in _low_idx if re.search(r"\w", tokens[i])] if len(low_idx) == 0: return [] spans = [[low_idx[0], low_idx[0] + num_pad_tokens + 1]] for i, idx in enumerate(low_idx[1:]): end = idx + num_pad_tokens + 1 if idx - low_idx[i] < min_token_gap: spans[-1][1] = end else: spans.append([idx, end]) return ["".join(tokens[start:end]) for start, end in spans] [docs]class FlareChain(Chain): question_generator_chain: QuestionGeneratorChain
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
106956c91fd6-2
[docs]class FlareChain(Chain): question_generator_chain: QuestionGeneratorChain response_chain: _ResponseChain = Field(default_factory=_OpenAIResponseChain) output_parser: FinishedOutputParser = Field(default_factory=FinishedOutputParser) retriever: BaseRetriever min_prob: float = 0.2 min_token_gap: int = 5 num_pad_tokens: int = 2 max_iter: int = 10 start_with_retrieval: bool = True @property def input_keys(self) -> List[str]: return ["user_input"] @property def output_keys(self) -> List[str]: return ["response"] def _do_generation( self, questions: List[str], user_input: str, response: str, _run_manager: CallbackManagerForChainRun, ) -> Tuple[str, bool]: callbacks = _run_manager.get_child() docs = [] for question in questions: docs.extend(self.retriever.get_relevant_documents(question)) context = "\n\n".join(d.page_content for d in docs) result = self.response_chain.predict( user_input=user_input, context=context, response=response, callbacks=callbacks, ) marginal, finished = self.output_parser.parse(result) return marginal, finished def _do_retrieval( self, low_confidence_spans: List[str], _run_manager: CallbackManagerForChainRun, user_input: str, response: str, initial_response: str, ) -> Tuple[str, bool]: question_gen_inputs = [ { "user_input": user_input,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
106956c91fd6-3
question_gen_inputs = [ { "user_input": user_input, "current_response": initial_response, "uncertain_span": span, } for span in low_confidence_spans ] callbacks = _run_manager.get_child() question_gen_outputs = self.question_generator_chain.apply( question_gen_inputs, callbacks=callbacks ) questions = [ output[self.question_generator_chain.output_keys[0]] for output in question_gen_outputs ] _run_manager.on_text( f"Generated Questions: {questions}", color="yellow", end="\n" ) return self._do_generation(questions, user_input, response, _run_manager) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() user_input = inputs[self.input_keys[0]] response = "" for i in range(self.max_iter): _run_manager.on_text( f"Current Response: {response}", color="blue", end="\n" ) _input = {"user_input": user_input, "context": "", "response": response} tokens, log_probs = self.response_chain.generate_tokens_and_log_probs( _input, run_manager=_run_manager ) low_confidence_spans = _low_confidence_spans( tokens, log_probs, self.min_prob, self.min_token_gap, self.num_pad_tokens, ) initial_response = response.strip() + " " + "".join(tokens)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
106956c91fd6-4
) initial_response = response.strip() + " " + "".join(tokens) if not low_confidence_spans: response = initial_response final_response, finished = self.output_parser.parse(response) if finished: return {self.output_keys[0]: final_response} continue marginal, finished = self._do_retrieval( low_confidence_spans, _run_manager, user_input, response, initial_response, ) response = response.strip() + " " + marginal if finished: break return {self.output_keys[0]: response} [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, max_generation_len: int = 32, **kwargs: Any ) -> FlareChain: question_gen_chain = QuestionGeneratorChain(llm=llm) response_llm = OpenAI( max_tokens=max_generation_len, model_kwargs={"logprobs": 1}, temperature=0 ) response_chain = _OpenAIResponseChain(llm=response_llm) return cls( question_generator_chain=question_gen_chain, response_chain=response_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
9062a4f7a33f-0
Source code for langchain.embeddings.jina """Wrapper around Jina embedding models.""" import os from typing import Any, Dict, List, Optional import requests from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class JinaEmbeddings(BaseModel, Embeddings): client: Any #: :meta private: model_name: str = "ViT-B-32::openai" """Model name to use.""" jina_auth_token: Optional[str] = None jina_api_url: str = "https://api.clip.jina.ai/api/v1/models/" request_headers: Optional[dict] = None [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that auth token exists in environment.""" # Set Auth jina_auth_token = get_from_dict_or_env( values, "jina_auth_token", "JINA_AUTH_TOKEN" ) values["jina_auth_token"] = jina_auth_token values["request_headers"] = (("authorization", jina_auth_token),) # Test that package is installed try: import jina except ImportError: raise ImportError( "Could not import `jina` python package. " "Please install it with `pip install jina`." ) # Setup client jina_api_url = os.environ.get("JINA_API_URL", values["jina_api_url"]) model_name = values["model_name"] try: resp = requests.get( jina_api_url + f"?model_name={model_name}", headers={"Authorization": jina_auth_token}, )
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
9062a4f7a33f-1
headers={"Authorization": jina_auth_token}, ) if resp.status_code == 401: raise ValueError( "The given Jina auth token is invalid. " "Please check your Jina auth token." ) elif resp.status_code == 404: raise ValueError( f"The given model name `{model_name}` is not valid. " f"Please go to https://cloud.jina.ai/user/inference " f"and create a model with the given model name." ) resp.raise_for_status() endpoint = resp.json()["endpoints"]["grpc"] values["client"] = jina.Client(host=endpoint) except requests.exceptions.HTTPError as err: raise ValueError(f"Error: {err!r}") return values def _post(self, docs: List[Any], **kwargs: Any) -> Any: payload = dict(inputs=docs, metadata=self.request_headers, **kwargs) return self.client.post(on="/encode", **payload) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Jina's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ from docarray import Document, DocumentArray embeddings = self._post( docs=DocumentArray([Document(text=t) for t in texts]) ).embeddings return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Call out to Jina's embedding endpoint. Args: text: The text to embed. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
9062a4f7a33f-2
Args: text: The text to embed. Returns: Embeddings for the text. """ from docarray import Document, DocumentArray embedding = self._post(docs=DocumentArray([Document(text=text)])).embeddings[0] return list(map(float, embedding))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
23ea29a6e38b-0
Source code for langchain.embeddings.elasticsearch from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.utils import get_from_env if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient from langchain.embeddings.base import Embeddings [docs]class ElasticsearchEmbeddings(Embeddings): """ Wrapper around Elasticsearch embedding models. This class provides an interface to generate embeddings using a model deployed in an Elasticsearch cluster. It requires an Elasticsearch connection object and the model_id of the model deployed in the cluster. In Elasticsearch you need to have an embedding model loaded and deployed. - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, client: MlClient, model_id: str, *, input_field: str = "text_field", ): """ Initialize the ElasticsearchEmbeddings instance. Args: client (MlClient): An Elasticsearch ML client object. model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. """ self.client = client self.model_id = model_id self.input_field = input_field [docs] @classmethod def from_credentials( cls, model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
23ea29a6e38b-1
es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
23ea29a6e38b-2
from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install " "elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID") es_user = es_user or get_from_env("es_user", "ES_USER") es_password = es_password or get_from_env("es_password", "ES_PASSWORD") # Connect to Elasticsearch es_connection = Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field) [docs] @classmethod def from_es_connection( cls, model_id: str, es_connection: Elasticsearch, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """ Instantiate embeddings from an existing Elasticsearch connection. This method provides a way to create an instance of the ElasticsearchEmbeddings class using an existing Elasticsearch connection. The connection object is used to create an MlClient, which is then used to initialize the ElasticsearchEmbeddings instance. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch connection object. input_field (str, optional): The name of the key for the input text field in the document. Defaults to 'text_field'. Returns: ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class. Example: .. code-block:: python from elasticsearch import Elasticsearch
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
23ea29a6e38b-3
Example: .. code-block:: python from elasticsearch import Elasticsearch from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Create Elasticsearch connection es_connection = Elasticsearch( hosts=["localhost:9200"], http_auth=("user", "password") ) # Instantiate ElasticsearchEmbeddings using the existing connection embeddings = ElasticsearchEmbeddings.from_es_connection( model_id, es_connection, input_field=input_field, ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ # Importing MlClient from elasticsearch.client within the method to # avoid unnecessary import if the method is not used from elasticsearch.client import MlClient # Create an MlClient from the given Elasticsearch connection client = MlClient(es_connection) # Return a new instance of the ElasticsearchEmbeddings class with # the MlClient, model_id, and input_field return cls(client, model_id, input_field=input_field) def _embedding_func(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for the given texts using the Elasticsearch model. Args: texts (List[str]): A list of text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each text in the input list. """ response = self.client.infer_trained_model(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
23ea29a6e38b-4
list. """ response = self.client.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) embeddings = [doc["predicted_value"] for doc in response["inference_results"]] return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for a list of documents. Args: texts (List[str]): A list of document text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each document in the input list. """ return self._embedding_func(texts) [docs] def embed_query(self, text: str) -> List[float]: """ Generate an embedding for a single query text. Args: text (str): The query text to generate an embedding for. Returns: List[float]: The embedding for the input query text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
4f37bf8d3710-0
Source code for langchain.embeddings.google_palm """Wrapper arround Google's PaLM Embeddings APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" import google.api_core.exceptions multiplier = 2 min_seconds = 1 max_seconds = 60 max_retries = 10 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) [docs]def embed_with_retry( embeddings: GooglePalmEmbeddings, *args: Any, **kwargs: Any ) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _embed_with_retry(*args: Any, **kwargs: Any) -> Any: return embeddings.client.generate_embeddings(*args, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/google_palm.html
4f37bf8d3710-1
return embeddings.client.generate_embeddings(*args, **kwargs) return _embed_with_retry(*args, **kwargs) [docs]class GooglePalmEmbeddings(BaseModel, Embeddings): client: Any google_api_key: Optional[str] model_name: str = "models/embedding-gecko-001" """Model name to use.""" [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" ) try: import google.generativeai as genai genai.configure(api_key=google_api_key) except ImportError: raise ImportError("Could not import google.generativeai python package.") values["client"] = genai return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: return [self.embed_query(text) for text in texts] [docs] def embed_query(self, text: str) -> List[float]: """Embed query text.""" embedding = embed_with_retry(self, self.model_name, text) return embedding["embedding"]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/google_palm.html
6991ba944b41-0
Source code for langchain.embeddings.dashscope """Wrapper around DashScope embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Optional, ) from pydantic import BaseModel, Extra, root_validator from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]: multiplier = 1 min_seconds = 1 max_seconds = 4 # Wait 2^x * 1 second between each retry starting with # 1 seconds, then up to 4 seconds, then 4 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(HTTPError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) [docs]def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: resp = embeddings.client.call(**kwargs) if resp.status_code == 200: return resp.output["embeddings"] elif resp.status_code in [400, 401]:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
6991ba944b41-1
elif resp.status_code in [400, 401]: raise ValueError( f"status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) else: raise HTTPError( f"HTTP error occurred: status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) return _embed_with_retry(**kwargs) [docs]class DashScopeEmbeddings(BaseModel, Embeddings): """Wrapper around DashScope embedding models. To use, you should have the ``dashscope`` python package installed, and the environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import DashScopeEmbeddings embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key") Example: .. code-block:: python import os os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY" from langchain.embeddings.dashscope import DashScopeEmbeddings embeddings = DashScopeEmbeddings( model="text-embedding-v1", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-v1" dashscope_api_key: Optional[str] = None """Maximum number of retries to make when generating.""" max_retries: int = 5 [docs] class Config:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
6991ba944b41-2
max_retries: int = 5 [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: import dashscope """Validate that api key and python package exists in environment.""" values["dashscope_api_key"] = get_from_dict_or_env( values, "dashscope_api_key", "DASHSCOPE_API_KEY" ) dashscope.api_key = values["dashscope_api_key"] try: import dashscope values["client"] = dashscope.TextEmbedding except ImportError: raise ImportError( "Could not import dashscope python package. " "Please install it with `pip install dashscope`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to DashScope's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ embeddings = embed_with_retry( self, input=texts, text_type="document", model=self.model ) embedding_list = [item["embedding"] for item in embeddings] return embedding_list [docs] def embed_query(self, text: str) -> List[float]: """Call out to DashScope's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
6991ba944b41-3
Returns: Embedding for the text. """ embedding = embed_with_retry( self, input=text, text_type="query", model=self.model )[0]["embedding"] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
5b4532fb538e-0
Source code for langchain.embeddings.embaas """Wrapper around embaas embeddings API.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from typing_extensions import NotRequired, TypedDict from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env # Currently supported maximum batch size for embedding requests MAX_BATCH_SIZE = 256 EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/" [docs]class EmbaasEmbeddingsPayload(TypedDict): """Payload for the embaas embeddings API.""" model: str texts: List[str] instruction: NotRequired[str] [docs]class EmbaasEmbeddings(BaseModel, Embeddings): """Wrapper around embaas's embedding service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Initialise with default model and instruction from langchain.embeddings import EmbaasEmbeddings emb = EmbaasEmbeddings() # Initialise with custom model and instruction from langchain.embeddings import EmbaasEmbeddings emb_model = "instructor-large" emb_inst = "Represent the Wikipedia document for retrieval" emb = EmbaasEmbeddings( model=emb_model, instruction=emb_inst ) """ model: str = "e5-large-v2" """The model used for embeddings.""" instruction: Optional[str] = None """Instruction used for domain-specific embeddings.""" api_url: str = EMBAAS_API_URL
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
5b4532fb538e-1
api_url: str = EMBAAS_API_URL """The URL for the embaas embeddings API.""" embaas_api_key: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( values, "embaas_api_key", "EMBAAS_API_KEY" ) values["embaas_api_key"] = embaas_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying params.""" return {"model": self.model, "instruction": self.instruction} def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload: """Generates payload for the API request.""" payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) if self.instruction: payload["instruction"] = self.instruction return payload def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]: """Sends a request to the Embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } response = requests.post(self.api_url, headers=headers, json=payload) response.raise_for_status() parsed_response = response.json() embeddings = [item["embedding"] for item in parsed_response["data"]] return embeddings
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
5b4532fb538e-2
return embeddings def _generate_embeddings(self, texts: List[str]) -> List[List[float]]: """Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError(f"Error raised by embaas embeddings API: {e}") parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( "Validation Error raised by embaas embeddings API:" f"{parsed_response['message']}" ) raise [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text. """ batches = [ texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE) ] embeddings = [self._generate_embeddings(batch) for batch in batches] # flatten the list of lists into a single list return [embedding for batch in embeddings for embedding in batch] [docs] def embed_query(self, text: str) -> List[float]: """Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings. """ return self.embed_documents([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
6e094b37f8b6-0
Source code for langchain.embeddings.aleph_alpha from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): """ Wrapper for Aleph Alpha's Asymmetric Embeddings AA provides you with an endpoint to embed a document and a query. The models were optimized to make the embeddings of documents and the query for a document as similar as possible. To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/ Example: .. code-block:: python from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding embeddings = AlephAlphaSymmetricSemanticEmbedding() document = "This is a content of the document" query = "What is the content of the document?" doc_result = embeddings.embed_documents([document]) query_result = embeddings.embed_query(query) """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" hosting: Optional[str] = "https://api.aleph-alpha.com" """Optional parameter that specifies which datacenters may process the request.""" normalize: Optional[bool] = True """Should returned embeddings be normalized""" compress_to_size: Optional[int] = 128 """Should the returned embeddings come back as an original 5120-dim vector, or should it be compressed to 128-dim.""" contextual_control_threshold: Optional[int] = None """Attention control parameters only apply to those tokens that have
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
6e094b37f8b6-1
"""Attention control parameters only apply to those tokens that have explicitly been set in the request.""" control_log_additive: Optional[bool] = True """Apply controls on prompt items by adding the log(control_factor) to attention scores.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) values["client"] = Client(token=aleph_alpha_api_key) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) document_embeddings = [] for text in texts: document_params = {
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
6e094b37f8b6-2
document_embeddings = [] for text in texts: document_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Document, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } document_request = SemanticEmbeddingRequest(**document_params) document_response = self.client.semantic_embed( request=document_request, model=self.model ) document_embeddings.append(document_response.embedding) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) symmetric_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Query, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } symmetric_request = SemanticEmbeddingRequest(**symmetric_params) symmetric_response = self.client.semantic_embed( request=symmetric_request, model=self.model ) return symmetric_response.embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
6e094b37f8b6-3
request=symmetric_request, model=self.model ) return symmetric_response.embedding [docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding): """The symmetric version of the Aleph Alpha's semantic embeddings. The main difference is that here, both the documents and queries are embedded with a SemanticRepresentation.Symmetric Example: .. code-block:: python from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding embeddings = AlephAlphaAsymmetricSemanticEmbedding() text = "This is a test text" doc_result = embeddings.embed_documents([text]) query_result = embeddings.embed_query(text) """ def _embed(self, text: str) -> List[float]: try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) query_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Symmetric, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } query_request = SemanticEmbeddingRequest(**query_params) query_response = self.client.semantic_embed( request=query_request, model=self.model ) return query_response.embedding [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's Document endpoint.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
6e094b37f8b6-4
"""Call out to Aleph Alpha's Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ document_embeddings = [] for text in texts: document_embeddings.append(self._embed(text)) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed(text)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
e92bb8c22274-0
Source code for langchain.embeddings.octoai_embeddings """Module providing a wrapper around OctoAI Compute Service embedding models.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_EMBED_INSTRUCTION = "Represent this input: " DEFAULT_QUERY_INSTRUCTION = "Represent the question for retrieving similar documents: " [docs]class OctoAIEmbeddings(BaseModel, Embeddings): """Wrapper around OctoAI Compute Service embedding models. The environment variable ``OCTOAI_API_TOKEN`` should be set with your API token, or it can be passed as a named parameter to the constructor. """ endpoint_url: Optional[str] = Field(None, description="Endpoint URL to use.") model_kwargs: Optional[dict] = Field( None, description="Keyword arguments to pass to the model." ) octoai_api_token: Optional[str] = Field(None, description="OCTOAI API Token") embed_instruction: str = Field( DEFAULT_EMBED_INSTRUCTION, description="Instruction to use for embedding documents.", ) query_instruction: str = Field( DEFAULT_QUERY_INSTRUCTION, description="Instruction to use for embedding query." ) [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator(allow_reuse=True) def validate_environment(cls, values: Dict) -> Dict: """Ensure that the API key and python package exist in environment.""" values["octoai_api_token"] = get_from_dict_or_env(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
e92bb8c22274-1
values["octoai_api_token"] = get_from_dict_or_env( values, "octoai_api_token", "OCTOAI_API_TOKEN" ) values["endpoint_url"] = get_from_dict_or_env( values, "endpoint_url", "ENDPOINT_URL" ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Return the identifying parameters.""" return { "endpoint_url": self.endpoint_url, "model_kwargs": self.model_kwargs or {}, } def _compute_embeddings( self, texts: List[str], instruction: str ) -> List[List[float]]: """Compute embeddings using an OctoAI instruct model.""" from octoai import client embeddings = [] octoai_client = client.Client(token=self.octoai_api_token) for text in texts: parameter_payload = { "sentence": str([text]), # for item in text]), "instruction": str([instruction]), # for item in text]), "parameters": self.model_kwargs or {}, } try: resp_json = octoai_client.infer(self.endpoint_url, parameter_payload) embedding = resp_json["embeddings"] except Exception as e: raise ValueError(f"Error raised by the inference endpoint: {e}") from e embeddings.append(embedding) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute document embeddings using an OctoAI instruct model.""" texts = list(map(lambda x: x.replace("\n", " "), texts)) return self._compute_embeddings(texts, self.embed_instruction)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
e92bb8c22274-2
return self._compute_embeddings(texts, self.embed_instruction) [docs] def embed_query(self, text: str) -> List[float]: """Compute query embedding using an OctoAI instruct model.""" text = text.replace("\n", " ") return self._compute_embeddings([text], self.embed_instruction)[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
00d11116242b-0
Source code for langchain.embeddings.self_hosted """Running custom embedding models on self-hosted remote hardware.""" from typing import Any, Callable, List from pydantic import Extra from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return pipeline(*args, **kwargs) [docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings): """Runs custom embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example using a model load function: .. code-block:: python from langchain.embeddings import SelfHostedEmbeddings from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") def get_pipeline(): model_id = "facebook/bart-large" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) return pipeline("feature-extraction", model=model, tokenizer=tokenizer) embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline, hardware=gpu
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
00d11116242b-1
model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from transformers import pipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") pipeline = pipeline(model="bert-base-uncased", task="feature-extraction") rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(gpu, path="models") embeddings = SelfHostedHFEmbeddings.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ inference_fn: Callable = _embed_documents """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client(self.pipeline_ref, texts) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
00d11116242b-2
if not isinstance(embeddings, list): return embeddings.tolist() return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embeddings = self.client(self.pipeline_ref, text) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
b45d4b47150d-0
Source code for langchain.embeddings.llamacpp """Wrapper around llama.cpp embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings [docs]class LlamaCppEmbeddings(BaseModel, Embeddings): """Wrapper around llama.cpp embedding models. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.embeddings import LlamaCppEmbeddings llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ client: Any #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock")
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
b45d4b47150d-1
use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, embedding=True, **model_params) except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. "
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
b45d4b47150d-2
raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using the Llama model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using the Llama model. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed(text) return list(map(float, embedding))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
dfe36cf2d9a1-0
Source code for langchain.embeddings.huggingface """Wrapper around HuggingFace embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field from langchain.embeddings.base import Embeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) [docs]class HuggingFaceEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': False} hf = HuggingFaceEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model."""
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
dfe36cf2d9a1-1
"""Key word arguments to pass when calling the `encode` method of the model.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: import sentence_transformers except ImportError as exc: raise ImportError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence_transformers`." ) from exc self.client = sentence_transformers.SentenceTransformer( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client.encode(texts, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.client.encode(text, **self.encode_kwargs) return embedding.tolist() [docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html