id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
72cd09940d58-4
# TODO: Handle async ) -> "OpenAPIEndpointChain": """Create an OpenAPIEndpoint from a spec at the specified url.""" operation = APIOperation.from_openapi_url(spec_url, path, method) return cls.from_api_operation( operation, requests=requests, llm=llm, ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
72cd09940d58-5
requests=_requests, param_mapping=param_mapping, verbose=verbose, return_intermediate_steps=return_intermediate_steps, callbacks=callbacks, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
cf0a0fd0f692-0
Source code for langchain.chains.api.openapi.requests_chain """request parser.""" import json import re from typing import Any from langchain.base_language import BaseLanguageModel from langchain.chains.api.openapi.prompts import REQUEST_TEMPLATE from langchain.chains.llm import LLMChain from langchain.prompts.prompt i...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/requests_chain.html
cf0a0fd0f692-1
) -> LLMChain: """Get the request parser.""" output_parser = APIRequesterOutputParser() prompt = PromptTemplate( template=REQUEST_TEMPLATE, output_parser=output_parser, partial_variables={"schema": typescript_definition}, input_variables=["instruct...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/requests_chain.html
b611e4631285-0
Source code for langchain.chains.api.openapi.response_chain """Response parser.""" import json import re from typing import Any from langchain.base_language import BaseLanguageModel from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE from langchain.chains.llm import LLMChain from langchain.prompts.prompt...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/response_chain.html
b611e4631285-1
template=RESPONSE_TEMPLATE, output_parser=output_parser, input_variables=["response", "instructions"], ) return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/response_chain.html
cd17347bc766-0
Source code for langchain.chains.conversational_retrieval.base """Chain for chatting with a vector database.""" from __future__ import annotations import inspect import warnings from abc import abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union from pydantic imp...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-1
elif isinstance(dialogue_turn, tuple): human = "Human: " + dialogue_turn[0] ai = "Assistant: " + dialogue_turn[1] buffer += "\n" + "\n".join([human, ai]) else: raise ValueError( f"Unsupported chat history format: {type(dialogue_turn)}." ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-2
question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-3
output["generated_question"] = new_question return output @abstractmethod async def _aget_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> List[Document]: """Get docs.""" async def _acall( ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-4
) output: Dict[str, Any] = {self.output_key: answer} if self.return_source_documents: output["source_documents"] = docs if self.return_generated_question: output["generated_question"] = new_question return output [docs] def save(self, file_path: Union[Path, str...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-5
"""Get docs.""" docs = self.retriever.get_relevant_documents( question, callbacks=run_manager.get_child() ) return self._reduce_tokens_below_limit(docs) async def _aget_docs( self, question: str, inputs: Dict[str, Any], *, run_manager: Asyn...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-6
verbose=verbose, callbacks=callbacks, ) return cls( retriever=retriever, combine_docs_chain=doc_chain, question_generator=condense_question_chain, callbacks=callbacks, **kwargs, ) [docs]class ChatVectorDBChain(BaseConversati...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
cd17347bc766-7
) -> List[Document]: """Get docs.""" raise NotImplementedError("ChatVectorDBChain does not support async") [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROM...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
67ab029c6ec9-0
Source code for langchain.chains.pal.base """Implements Program-Aided Language Models. As in https://arxiv.org/pdf/2211.10435.pdf. """ from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLangua...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
67ab029c6ec9-1
if "llm" in values: warnings.warn( "Directly instantiating an PALChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the one of " "the class method constructors from_math_prompt, " "from_colored_object_prompt...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
67ab029c6ec9-2
output = {self.output_key: res.strip()} if self.return_intermediate_steps: output["intermediate_steps"] = code return output [docs] @classmethod def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain: """Load PAL from math prompt.""" llm_chain = L...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
2dec832ca7c5-0
Source code for langchain.chains.router.embedding_router from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple, Type from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.router.base import RouterChain from langchai...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/embedding_router.html
2dec832ca7c5-1
"""Convenience constructor.""" documents = [] for name, descriptions in names_and_descriptions: for description in descriptions: documents.append( Document(page_content=description, metadata={"name": name}) ) vectorstore = vectorsto...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/embedding_router.html
4e56959913bf-0
Source code for langchain.chains.router.multi_prompt """Use a single chain to route an input to one of multiple llm chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain fro...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_prompt.html
4e56959913bf-1
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(), ) router_chain = LLMRouterChain....
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_prompt.html
f38444c62c6e-0
Source code for langchain.chains.router.multi_retrieval_qa """Use a single chain to route an input to one of multiple retrieval qa chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.base_language import BaseLanguageModel from langchain.chains import Conver...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
f38444c62c6e-1
default_retriever: Optional[BaseRetriever] = None, default_prompt: Optional[PromptTemplate] = None, default_chain: Optional[Chain] = None, **kwargs: Any, ) -> MultiRetrievalQAChain: if default_prompt and not default_retriever: raise ValueError( "`default_r...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
f38444c62c6e-2
prompt = PromptTemplate( template=prompt_template, input_variables=["history", "query"] ) _default_chain = ConversationChain( llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result" ) return cls( router_chain=router_...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
c4edbc7bd3cb-0
Source code for langchain.chains.router.base """Base classes for chain routing.""" from __future__ import annotations from abc import ABC from typing import Any, Dict, List, Mapping, NamedTuple, Optional from pydantic import Extra from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, Callba...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
c4edbc7bd3cb-1
silent_errors: bool = False """If True, use default_chain when an invalid destination name is provided. Defaults to False.""" [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
c4edbc7bd3cb-2
self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() route = await self.router_chain.aroute(inputs, ca...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
e808add899a0-0
Source code for langchain.chains.router.llm_router """Base classes for LLM-powered router chains.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type, cast from pydantic import root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager impo...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
e808add899a0-1
if not isinstance(outputs["next_inputs"], dict): raise ValueError def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
e808add899a0-2
next_inputs_inner_key: str = "input" [docs] def parse(self, text: str) -> Dict[str, Any]: try: expected_keys = ["destination", "next_inputs"] parsed = parse_and_check_json_markdown(text, expected_keys) if not isinstance(parsed["destination"], str): raise Va...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
0469f9a8fdb7-0
Source code for langchain.chains.llm_bash.prompt # flake8: noqa from __future__ import annotations import re from typing import List from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser, OutputParserException _PROMPT_TEMPLATE = """If someone asks you to perform a task, your ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/prompt.html
0469f9a8fdb7-1
for match in pattern.finditer(t): matched = match.group(1).strip() if matched: code_blocks.extend( [line for line in matched.split("\n") if line.strip()] ) return code_blocks @property def _type(self) -> str: return "bas...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/prompt.html
ec354e6e418c-0
Source code for langchain.chains.llm_bash.base """Chain that interprets a prompt and executes bash code to perform bash operations.""" from __future__ import annotations import logging import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_lang...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
ec354e6e418c-1
def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an LLMBashChain with an llm is deprecated. " "Please instantiate with llm_chain or using the from_llm class method." ) if "llm_chain" n...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
ec354e6e418c-2
question=inputs[self.input_key], callbacks=_run_manager.get_child() ) _run_manager.on_text(t, color="green", verbose=self.verbose) t = t.strip() try: parser = self.llm_chain.prompt.output_parser command_list = parser.parse(t) # type: ignore[union-attr] ex...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
cf98a0af4101-0
Source code for langchain.chains.natbot.crawler # flake8: noqa import time from sys import platform from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, TypedDict, Union, ) if TYPE_CHECKING: from playwright.sync_api import Browser, CDPSession, ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-1
self.page_element_buffer: Dict[int, ElementInViewPort] self.client: CDPSession def go_to_page(self, url: str) -> None: self.page.goto(url=url if "://" in url else "http://" + url) self.client = self.page.context.new_cdp_session(self.page) self.page_element_buffer = {} def scroll(...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-2
self.page.keyboard.press("Enter") def crawl(self) -> List[str]: page = self.page page_element_buffer = self.page_element_buffer start = time.time() page_state_as_text = [] device_pixel_ratio: float = page.evaluate("window.devicePixelRatio") if platform == "darwin" and...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-3
document: Dict[str, Any] = tree["documents"][0] nodes: Dict[str, Any] = document["nodes"] backend_node_id: Dict[int, int] = nodes["backendNodeId"] attributes: Dict[int, Dict[int, Any]] = nodes["attributes"] node_value: Dict[int, int] = nodes["nodeValue"] parent: Dict[int, int] = ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-4
if node_name == "img": return "img" if ( node_name == "button" or has_click_handler ): # found pages that needed this quirk return "button" else: return "text" def find_attributes( attributes: Dict[i...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-5
elif ( is_parent_desc_anchor ): # reuse the parent's anchor_id (which could be much higher in the tree) value = (True, anchor_id) else: value = ( False, None, ) # not a descendant of an anch...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-6
and elem_lower_bound >= win_upper_bound ) if not partially_is_in_viewport: continue meta_data: List[str] = [] # inefficient to grab the same set of keys for kinds of objects, but it's fine for now element_attributes = find_attributes( ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-7
element_node_value = None if node_value[index] >= 0: element_node_value = strings[node_value[index]] if ( element_node_value == "|" ): # commonly used as a separator, does not add much context - lets save ourselves some token space ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-8
element_node_value = element.get("node_value") node_is_clickable = element.get("is_clickable") node_meta_data: Optional[List[str]] = element.get("node_meta") inner_text = f"{element_node_value} " if element_node_value else "" meta = "" if node_index in child_n...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
cf98a0af4101-9
) id_counter += 1 print("Parsing time: {:0.2f} seconds".format(time.time() - start)) return elements_of_interest
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/crawler.html
217de4f6fe1d-0
Source code for langchain.chains.natbot.base """Implement an LLM driven browser.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import Cal...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
217de4f6fe1d-1
"Directly instantiating an NatBotChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: values["llm_chain"] = LLMChain(llm...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
217de4f6fe1d-2
) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() url = inputs[self.input_url_key] browser_content = inputs[self.input_browser_content_key] llm_cmd = self.llm_chain.predict( objective=self.objective, url=url[:100], ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
37c4da09001c-0
Source code for langchain.chains.question_answering.__init__ """Load question answering chains.""" from typing import Any, Mapping, Optional, Protocol from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from lan...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-1
callbacks: Callbacks = None, **kwargs: Any, ) -> MapRerankDocumentsChain: llm_chain = LLMChain( llm=llm, prompt=prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) return MapRerankDocumentsChain( llm_chain=llm_chain, ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-2
combine_prompt: Optional[BasePromptTemplate] = None, combine_document_variable_name: str = "summaries", map_reduce_document_variable_name: str = "context", collapse_prompt: Optional[BasePromptTemplate] = None, reduce_llm: Optional[BaseLanguageModel] = None, collapse_llm: Optional[BaseLanguageModel] ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-3
if collapse_llm is not None: raise ValueError( "collapse_llm provided, but collapse_prompt was not: please " "provide one or stop providing collapse_llm." ) else: _collapse_llm = collapse_llm or llm collapse_chain = StuffDocumentsChain( ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-4
) _refine_prompt = refine_prompt or refine_prompts.REFINE_PROMPT_SELECTOR.get_prompt( llm ) initial_chain = LLMChain( llm=llm, prompt=_question_prompt, verbose=verbose, callback_manager=callback_manager, callbacks=callbacks, ) _refine_llm = refine_llm ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
37c4da09001c-5
callback_manager: Callback manager to use for the chain. Returns: A chain to use for question answering. """ loader_mapping: Mapping[str, LoadingCallable] = { "stuff": _load_stuff_chain, "map_reduce": _load_map_reduce_chain, "refine": _load_refine_chain, "map_rerank":...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/question_answering/__init__.html
6638c626e013-0
Source code for langchain.chains.sql_database.base """Chain for interacting with SQL Database.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbac...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-1
return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the SQL table directly.""" use_query_checker: bool = False """Whether or not the query checker too...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-2
"""Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY] def _call( self, inputs: Dict[str, Any], run_manager: Optional[...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-3
result = self.database.run(sql_cmd) intermediate_steps.append(str(result)) # output: sql exec else: query_checker_prompt = self.query_checker_prompt or PromptTemplate( template=QUERY_CHECKER, input_variables=["query", "dialect"] ) ...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-4
llm_inputs["input"] = input_text intermediate_steps.append(llm_inputs) # input: final answer final_result = self.llm_chain.predict( callbacks=_run_manager.get_child(), **llm_inputs, ).strip() intermediate_steps.appe...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-5
2. Based on those tables, call the normal SQL database chain. This is useful in cases where the number of tables in the database is large. """ decider_chain: LLMChain sql_chain: SQLDatabaseChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
6638c626e013-6
def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _table_names = self.sql_chain.database.get_usable_table_names() table_na...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
c73402a76270-0
Source code for langchain.chains.flare.prompts from typing import Tuple from langchain.prompts import PromptTemplate from langchain.schema import BaseOutputParser [docs]class FinishedOutputParser(BaseOutputParser[Tuple[str, bool]]): finished_value: str = "FINISHED" [docs] def parse(self, text: str) -> Tuple[str,...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/prompts.html
106956c91fd6-0
Source code for langchain.chains.flare.base from __future__ import annotations import re from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple import numpy as np from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager impor...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
106956c91fd6-1
) ) def _extract_tokens_and_log_probs( self, generations: List[Generation] ) -> Tuple[Sequence[str], Sequence[float]]: tokens = [] log_probs = [] for gen in generations: if gen.generation_info is None: raise ValueError tokens.extend(gen...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
106956c91fd6-2
[docs]class FlareChain(Chain): question_generator_chain: QuestionGeneratorChain response_chain: _ResponseChain = Field(default_factory=_OpenAIResponseChain) output_parser: FinishedOutputParser = Field(default_factory=FinishedOutputParser) retriever: BaseRetriever min_prob: float = 0.2 min_token_...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
106956c91fd6-3
question_gen_inputs = [ { "user_input": user_input, "current_response": initial_response, "uncertain_span": span, } for span in low_confidence_spans ] callbacks = _run_manager.get_child() question_gen_outputs = s...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
106956c91fd6-4
) initial_response = response.strip() + " " + "".join(tokens) if not low_confidence_spans: response = initial_response final_response, finished = self.output_parser.parse(response) if finished: return {self.output_keys[0]: final...
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
9062a4f7a33f-0
Source code for langchain.embeddings.jina """Wrapper around Jina embedding models.""" import os from typing import Any, Dict, List, Optional import requests from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class JinaEm...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
9062a4f7a33f-1
headers={"Authorization": jina_auth_token}, ) if resp.status_code == 401: raise ValueError( "The given Jina auth token is invalid. " "Please check your Jina auth token." ) elif resp.status_code == 404: ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
9062a4f7a33f-2
Args: text: The text to embed. Returns: Embeddings for the text. """ from docarray import Document, DocumentArray embedding = self._post(docs=DocumentArray([Document(text=text)])).embeddings[0] return list(map(float, embedding))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/jina.html
23ea29a6e38b-0
Source code for langchain.embeddings.elasticsearch from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.utils import get_from_env if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient from langchain.embeddings.base impor...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
23ea29a6e38b-1
es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
23ea29a6e38b-2
from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install " "elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID") ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
23ea29a6e38b-3
Example: .. code-block:: python from elasticsearch import Elasticsearch from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" #...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
23ea29a6e38b-4
list. """ response = self.client.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) embeddings = [doc["predicted_value"] for doc in response["inference_results"]] return embeddings [docs] def embed_documents(self, texts...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
4f37bf8d3710-0
Source code for langchain.embeddings.google_palm """Wrapper arround Google's PaLM Embeddings APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/google_palm.html
4f37bf8d3710-1
return embeddings.client.generate_embeddings(*args, **kwargs) return _embed_with_retry(*args, **kwargs) [docs]class GooglePalmEmbeddings(BaseModel, Embeddings): client: Any google_api_key: Optional[str] model_name: str = "models/embedding-gecko-001" """Model name to use.""" [docs] @root_validator...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/google_palm.html
6991ba944b41-0
Source code for langchain.embeddings.dashscope """Wrapper around DashScope embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Optional, ) from pydantic import BaseModel, Extra, root_validator from requests.exceptions import HTTPErro...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
6991ba944b41-1
elif resp.status_code in [400, 401]: raise ValueError( f"status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) else: raise HTTPError( f"HTTP error occurred: status_code: {resp.status_code} \n " ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
6991ba944b41-2
max_retries: int = 5 [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: import dashscope """Validate that api key and python package exists in environment.""" ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
6991ba944b41-3
Returns: Embedding for the text. """ embedding = embed_with_retry( self, input=text, text_type="query", model=self.model )[0]["embedding"] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
5b4532fb538e-0
Source code for langchain.embeddings.embaas """Wrapper around embaas embeddings API.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from typing_extensions import NotRequired, TypedDict from langchain.embeddings.base import Embeddings from l...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
5b4532fb538e-1
api_url: str = EMBAAS_API_URL """The URL for the embaas embeddings API.""" embaas_api_key: Optional[str] = None [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @root_validator() def validate_environment(cls, values: Dict) -> Dict: ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
5b4532fb538e-2
return embeddings def _generate_embeddings(self, texts: List[str]) -> List[List[float]]: """Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
6e094b37f8b6-0
Source code for langchain.embeddings.aleph_alpha from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): """...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
6e094b37f8b6-1
"""Attention control parameters only apply to those tokens that have explicitly been set in the request.""" control_log_additive: Optional[bool] = True """Apply controls on prompt items by adding the log(control_factor) to attention scores.""" aleph_alpha_api_key: Optional[str] = None """API k...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
6e094b37f8b6-2
document_embeddings = [] for text in texts: document_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Document, "compress_to_size": self.compress_to_size, "normalize": self.normalize, ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
6e094b37f8b6-3
request=symmetric_request, model=self.model ) return symmetric_response.embedding [docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding): """The symmetric version of the Aleph Alpha's semantic embeddings. The main difference is that here, both the documents and ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
6e094b37f8b6-4
"""Call out to Aleph Alpha's Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ document_embeddings = [] for text in texts: document_embeddings.append(self._embed(text)) retur...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
e92bb8c22274-0
Source code for langchain.embeddings.octoai_embeddings """Module providing a wrapper around OctoAI Compute Service embedding models.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings from langchain.utils...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
e92bb8c22274-1
values["octoai_api_token"] = get_from_dict_or_env( values, "octoai_api_token", "OCTOAI_API_TOKEN" ) values["endpoint_url"] = get_from_dict_or_env( values, "endpoint_url", "ENDPOINT_URL" ) return values @property def _identifying_params(self) -> Mapping[str...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
e92bb8c22274-2
return self._compute_embeddings(texts, self.embed_instruction) [docs] def embed_query(self, text: str) -> List[float]: """Compute query embedding using an OctoAI instruct model.""" text = text.replace("\n", " ") return self._compute_embeddings([text], self.embed_instruction)[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/octoai_embeddings.html
00d11116242b-0
Source code for langchain.embeddings.self_hosted """Running custom embedding models on self-hosted remote hardware.""" from typing import Any, Callable, List from pydantic import Extra from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline def _embed_documents(pipeline: Any, *arg...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
00d11116242b-1
model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
00d11116242b-2
if not isinstance(embeddings, list): return embeddings.tolist() return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embe...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
b45d4b47150d-0
Source code for langchain.embeddings.llamacpp """Wrapper around llama.cpp embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings [docs]class LlamaCppEmbeddings(BaseModel, Embeddings): """Wrapper ...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
b45d4b47150d-1
use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
b45d4b47150d-2
raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
dfe36cf2d9a1-0
Source code for langchain.embeddings.huggingface """Wrapper around HuggingFace embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field from langchain.embeddings.base import Embeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_M...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
dfe36cf2d9a1-1
"""Key word arguments to pass when calling the `encode` method of the model.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: import sentence_transformers except ImportError as exc: raise ImportEr...
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html