id
stringlengths
14
16
text
stringlengths
29
2.73k
source
stringlengths
49
117
d04aca0fd0ca-1
"""Whether or not to use sampling; use greedy decoding otherwise.""" max_length: Optional[int] = None """The maximum length of the sequence to be generated.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""...
https://python.langchain.com/en/latest/_modules/langchain/llms/petals.html
d04aca0fd0ca-2
from petals import DistributedBloomForCausalLM from transformers import BloomTokenizerFast model_name = values["model_name"] values["tokenizer"] = BloomTokenizerFast.from_pretrained(model_name) values["client"] = DistributedBloomForCausalLM.from_pretrained(model_name) ...
https://python.langchain.com/en/latest/_modules/langchain/llms/petals.html
d04aca0fd0ca-3
"""Call the Petals API.""" params = self._default_params inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"] outputs = self.client.generate(inputs, **params) text = self.tokenizer.decode(outputs[0]) if stop is not None: # I believe this is required since...
https://python.langchain.com/en/latest/_modules/langchain/llms/petals.html
8a77fb1c3855-0
Source code for langchain.experimental.autonomous_agents.autogpt.agent from __future__ import annotations from typing import List, Optional from pydantic import ValidationError from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.experimental.autonomous_agents.au...
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
8a77fb1c3855-1
ai_role: str, memory: VectorStoreRetriever, tools: List[BaseTool], llm: BaseChatModel, human_in_the_loop: bool = False, output_parser: Optional[BaseAutoGPTOutputParser] = None, ) -> AutoGPT: prompt = AutoGPTPrompt( ai_name=ai_name, ai_role=ai_r...
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
8a77fb1c3855-2
# Get command name and arguments action = self.output_parser.parse(assistant_reply) tools = {t.name: t for t in self.tools} if action.name == FINISH_NAME: return action.args["response"] if action.name in tools: tool = tools[action.name] ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
fd27ab7661c6-0
Source code for langchain.experimental.autonomous_agents.baby_agi.baby_agi """BabyAGI agent.""" from collections import deque from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerFo...
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
fd27ab7661c6-1
print(str(t["task_id"]) + ": " + t["task_name"]) def print_next_task(self, task: Dict) -> None: print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") print(str(task["task_id"]) + ": " + task["task_name"]) def print_task_result(self, result: str) -> None: print("\033[93m...
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
fd27ab7661c6-2
next_task_id = int(this_task_id) + 1 response = self.task_prioritization_chain.run( task_names=", ".join(task_names), next_task_id=str(next_task_id), objective=objective, ) new_tasks = response.split("\n") prioritized_task_list = [] for task_st...
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
fd27ab7661c6-3
"""Run the agent.""" objective = inputs["objective"] first_task = inputs.get("first_task", "Make a todo list") self.add_task({"task_id": 1, "task_name": first_task}) num_iters = 0 while True: if self.task_list: self.print_task_list() # ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
fd27ab7661c6-4
return {} [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, verbose: bool = False, task_execution_chain: Optional[Chain] = None, **kwargs: Dict[str, Any], ) -> "BabyAGI": """Initialize the BabyAGI Controller.""" ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
db2e2cea29a6-0
Source code for langchain.experimental.generative_agents.memory import logging import re from datetime import datetime from typing import Any, Dict, List, Optional from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.prompts import PromptTemplate from langchain.retrievers ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
db2e2cea29a6-1
# output keys relevant_memories_key: str = "relevant_memories" relevant_memories_simple_key: str = "relevant_memories_simple" most_recent_memories_key: str = "most_recent_memories" now_key: str = "now" reflecting: bool = False def chain(self, prompt: PromptTemplate) -> LLMChain: return L...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
db2e2cea29a6-2
) -> List[str]: """Generate 'insights' on a topic of reflection, based on pertinent memories.""" prompt = PromptTemplate.from_template( "Statements about {topic}\n" + "{related_statements}\n\n" + "What 5 high-level insights can you infer from the above statements?" ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
db2e2cea29a6-3
"On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the" + " following piece of memory. Respond with a single integer." ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
db2e2cea29a6-4
and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result [docs] def fetch_memories( self, ob...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
db2e2cea29a6-5
break consumed_tokens += self.llm.get_num_tokens(doc.page_content) if consumed_tokens < self.max_tokens_limit: result.append(doc) return self.format_memories_simple(result) @property def memory_variables(self) -> List[str]: """Input keys this memory class ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
db2e2cea29a6-6
[docs] def clear(self) -> None: """Clear memory contents.""" # TODO By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html
5c718024d5f5-0
Source code for langchain.experimental.generative_agents.generative_agent import re from datetime import datetime from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.experimental.gen...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5c718024d5f5-1
arbitrary_types_allowed = True # LLM-related methods @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] de...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5c718024d5f5-2
entity_action = self._get_entity_action(observation, entity_name) q1 = f"What is the relationship between {self.name} and {entity_name}" q2 = f"{entity_name} is {entity_action}" return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip() def _generate_reaction( self, observ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5c718024d5f5-3
) consumed_tokens = self.llm.get_num_tokens( prompt.format(most_recent_memories="", **kwargs) ) kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens return self.chain(prompt=prompt).run(**kwargs).strip() def _clean_response(self, text: str) -> str: ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5c718024d5f5-4
if "SAY:" in result: said_value = self._clean_response(result.split("SAY:")[-1]) return True, f"{self.name} said {said_value}" else: return False, result [docs] def generate_dialogue_response( self, observation: str, now: Optional[datetime] = None ) -> Tuple[bo...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5c718024d5f5-5
) return True, f"{self.name} said {response_text}" else: return False, result ###################################################### # Agent stateful' summary methods. # # Each dialog or response prompt includes a header # # summarizing the agent's sel...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
5c718024d5f5-6
+ f"\nInnate traits: {self.traits}" + f"\n{self.summary}" ) [docs] def get_full_header( self, force_refresh: bool = False, now: Optional[datetime] = None ) -> str: """Return a full header of the agent's status, summary, and current time.""" now = datetime.now() if now ...
https://python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html
e16cbe7d80b8-0
Source code for langchain.chains.mapreduce """Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Extra from langchain.base_languag...
https://python.langchain.com/en/latest/_modules/langchain/chains/mapreduce.html
e16cbe7d80b8-1
reduce_chain = StuffDocumentsChain(llm_chain=llm_chain, callbacks=callbacks) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=reduce_chain, callbacks=callbacks, ) return cls( combine_documents_chain=com...
https://python.langchain.com/en/latest/_modules/langchain/chains/mapreduce.html
3299baebe475-0
Source code for langchain.chains.transform """Chain that runs an arbitrary python function.""" from typing import Callable, Dict, List, Optional from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain [docs]class TransformChain(Chain): """Chain transform chain outp...
https://python.langchain.com/en/latest/_modules/langchain/chains/transform.html
b5fe4cebc137-0
Source code for langchain.chains.moderation """Pass input through a moderation endpoint.""" from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.utils import get_from_dic...
https://python.langchain.com/en/latest/_modules/langchain/chains/moderation.html
b5fe4cebc137-1
values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization values["client"] = ...
https://python.langchain.com/en/latest/_modules/langchain/chains/moderation.html
bdef456eeb70-0
Source code for langchain.chains.loading """Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocume...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-1
if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = con...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-2
) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_p...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-3
if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif ...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-4
# llm attribute is deprecated in favor of llm_chain, here to support old configs elif "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) # llm_path attribute is deprecated in favor of llm_chain_path, # its to support old configs elif "llm_path" in conf...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-5
create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_asse...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-6
llm_chain = None if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) # llm attribute is deprecated in favor of llm_chain, here t...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-7
elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: ...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-8
if llm_chain: return PALChain(llm_chain=llm_chain, prompt=prompt, **config) else: return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.p...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-9
refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combi...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-10
config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_docum...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-11
vectorstore=vectorstore, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-12
elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( ...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-13
} def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading...
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
bdef456eeb70-14
config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chains/loading.html
4fbf4a02103b-0
Source code for langchain.chains.llm_requests """Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langc...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_requests.html
4fbf4a02103b-1
:meta private: """ return [self.output_key] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: ...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_requests.html
b6a3cc115d2f-0
Source code for langchain.chains.llm """Chain that just formats a prompt and calls an LLM.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from pydantic import Extra from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import (...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm.html
b6a3cc115d2f-1
def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: response = self....
https://python.langchain.com/en/latest/_modules/langchain/chains/llm.html
b6a3cc115d2f-2
"""Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**se...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm.html
b6a3cc115d2f-3
await run_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, ...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm.html
b6a3cc115d2f-4
except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise e outputs = self.create_outputs(response) await run_manager.on_chain_end({"outputs": outputs}) return outputs [docs] def create_outputs(self, response: LLMResult) -> List[Dict[str, st...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm.html
b6a3cc115d2f-5
Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return (await self.acall(kwargs, callbacks=callbacks))[self.output_key] [docs] def predict_and_parse( self, callbacks: Callbacks = None...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm.html
b6a3cc115d2f-6
if self.prompt.output_parser is not None: return [ self.prompt.output_parser.parse(res[self.output_key]) for res in result ] else: return result [docs] async def aapply_and_parse( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm.html
1869b5d332c9-0
Source code for langchain.chains.sequential """Chain pipeline where the outputs of one step feed directly into next.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, )...
https://python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
1869b5d332c9-1
overlapping_keys = set(input_variables) & set(memory_keys) raise ValueError( f"The the input key(s) {''.join(overlapping_keys)} are found " f"in the Memory keys ({memory_keys}) - please use input and " f"memory keys that don't overlap." ...
https://python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
1869b5d332c9-2
for i, chain in enumerate(self.chains): callbacks = _run_manager.get_child() outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} async def _acall( self...
https://python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
1869b5d332c9-3
""" return [self.output_key] @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that chains are all single input/output.""" for chain in values["chains"]: if len(chain.input_keys) != 1: raise ValueError( "Chains u...
https://python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
1869b5d332c9-4
) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.c...
https://python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
3edb00401123-0
Source code for langchain.chains.llm_checker.base """Chain for question-answering with self-verification.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.cal...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
3edb00401123-1
) chains = [ create_draft_answer_chain, list_assertions_chain, check_assertions_chain, revised_answer_chain, ] question_to_checked_assertions_chain = SequentialChain( chains=chains, input_variables=["question"], output_variables=["revised_statement"], ...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
3edb00401123-2
if "llm" in values: warnings.warn( "Directly instantiating an LLMCheckerChain with an llm is deprecated. " "Please instantiate with question_to_checked_assertions_chain " "or using the from_llm class method." ) if ( "que...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
3edb00401123-3
output = self.question_to_checked_assertions_chain( {"question": question}, callbacks=_run_manager.get_child() ) return {self.output_key: output["revised_statement"]} @property def _chain_type(self) -> str: return "llm_checker_chain" [docs] @classmethod def from_llm( ...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
c5565a3a143a-0
Source code for langchain.chains.sql_database.base """Chain for interacting with SQL Database.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbac...
https://python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
c5565a3a143a-1
return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the SQL table directly.""" use_query_checker: bool = False """Whether or not the query checker too...
https://python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
c5565a3a143a-2
:meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -...
https://python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
c5565a3a143a-3
result = self.database.run(sql_cmd) intermediate_steps.append(str(result)) # output: sql exec else: query_checker_prompt = self.query_checker_prompt or PromptTemplate( template=QUERY_CHECKER, input_variables=["query", "dialect"] ) ...
https://python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
c5565a3a143a-4
llm_inputs["input"] = input_text intermediate_steps.append(llm_inputs) # input: final answer final_result = self.llm_chain.predict( callbacks=_run_manager.get_child(), **llm_inputs, ).strip() intermediate_steps.appe...
https://python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
c5565a3a143a-5
2. Based on those tables, call the normal SQL database chain. This is useful in cases where the number of tables in the database is large. """ decider_chain: LLMChain sql_chain: SQLDatabaseChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_...
https://python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
c5565a3a143a-6
def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _table_names = self.sql_chain.database.get_usable_table_names() table_na...
https://python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
cf8a9c153d17-0
Source code for langchain.chains.constitutional_ai.base """Chain for applying constitutional principles to the outputs of another chain.""" from typing import Any, Dict, List, Optional from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain...
https://python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
cf8a9c153d17-1
critique_chain: LLMChain revision_chain: LLMChain return_intermediate_steps: bool = False [docs] @classmethod def get_principles( cls, names: Optional[List[str]] = None ) -> List[ConstitutionalPrinciple]: if names is None: return list(PRINCIPLES.values()) else: ...
https://python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
cf8a9c153d17-2
) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() response = self.chain.run( **inputs, callbacks=_run_manager.get_child(), ) initial_response = response input_prompt = self.chain.prompt.format(**inputs) ...
https://python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
cf8a9c153d17-3
_run_manager.on_text( text=f"Applying {constitutional_principle.name}..." + "\n\n", verbose=self.verbose, color="green", ) _run_manager.on_text( text="Critique: " + critique + "\n\n", verbose=self.verbose, ...
https://python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
e411ab45e193-0
Source code for langchain.chains.conversational_retrieval.base """Chain for chatting with a vector database.""" from __future__ import annotations import warnings from abc import abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union from pydantic import Extra, Fiel...
https://python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
e411ab45e193-1
human = "Human: " + dialogue_turn[0] ai = "Assistant: " + dialogue_turn[1] buffer += "\n" + "\n".join([human, ai]) else: raise ValueError( f"Unsupported chat history format: {type(dialogue_turn)}." f" Full chat history: {chat_history} " ...
https://python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
e411ab45e193-2
) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: ...
https://python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
e411ab45e193-3
new_question = await self.question_generator.arun( question=question, chat_history=chat_history_str, callbacks=callbacks ) else: new_question = question docs = await self._aget_docs(new_question, inputs) new_inputs = inputs.copy() new_inputs["quest...
https://python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
e411ab45e193-4
while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: docs = self.retriever.get_relevant_documents(question) return self._re...
https://python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
e411ab45e193-5
combine_docs_chain=doc_chain, question_generator=condense_question_chain, **kwargs, ) [docs]class ChatVectorDBChain(BaseConversationalRetrievalChain): """Chain for chatting with a vector database.""" vectorstore: VectorStore = Field(alias="vectorstore") top_k_docs_for_context...
https://python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
e411ab45e193-6
**kwargs: Any, ) -> BaseConversationalRetrievalChain: """Load chain from LLM.""" combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} doc_chain = load_qa_chain( llm, chain_type=chain_type, **combine_docs_chain_kwargs, ) condense_ques...
https://python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
af9548bb47a6-0
Source code for langchain.chains.api.base """Chain that makes API calls and summarizes the responses to answer a question.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.ca...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
af9548bb47a6-1
if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values @root_validator(pre=True) def validate_api_answer_prompt(cls, values: Dict) -> Dict: """Check that api answer prompt expec...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
af9548bb47a6-2
async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = await se...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
af9548bb47a6-3
requests_wrapper = TextRequestsWrapper(headers=headers) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls( api_request_chain=get_request_chain, api_answer_chain=get_answer_chain, requests_wrapper=requests_wrapper, api_docs=api_doc...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
991f51fa6853-0
Source code for langchain.chains.api.openapi.chain """Chain that makes API calls and summarizes the responses to answer a question.""" from __future__ import annotations import json from typing import Any, Dict, List, NamedTuple, Optional, cast from pydantic import BaseModel, Field from requests import Response from la...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
991f51fa6853-1
""" return [self.instructions_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, "intermediate_steps"] ...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
991f51fa6853-2
path = self._construct_path(args) body_params = self._extract_body_params(args) query_params = self._extract_query_params(args) return { "url": path, "data": body_params, "params": query_params, } def _get_output(self, output: str, intermediate_ste...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
991f51fa6853-3
method = getattr(self.requests, self.api_operation.method.value) api_response: Response = method(**request_args) if api_response.status_code != 200: method_str = str(self.api_operation.method.value) response_text = ( f"{api_response.status_code...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
991f51fa6853-4
# TODO: Handle async ) -> "OpenAPIEndpointChain": """Create an OpenAPIEndpoint from a spec at the specified url.""" operation = APIOperation.from_openapi_url(spec_url, path, method) return cls.from_api_operation( operation, requests=requests, llm=llm, ...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
991f51fa6853-5
requests=_requests, param_mapping=param_mapping, verbose=verbose, return_intermediate_steps=return_intermediate_steps, callbacks=callbacks, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 02, 2023...
https://python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
c58bb0e82b01-0
Source code for langchain.chains.llm_math.base """Chain that interprets a prompt and executes python code to do math.""" from __future__ import annotations import math import re import warnings from typing import Any, Dict, List, Optional import numexpr from pydantic import Extra, root_validator from langchain.base_lan...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
c58bb0e82b01-1
if "llm" in values: warnings.warn( "Directly instantiating an LLMMathChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"]...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
c58bb0e82b01-2
) -> Dict[str, str]: run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_exp...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
c58bb0e82b01-3
elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} def _call( ...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
c58bb0e82b01-4
[docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate = PROMPT, **kwargs: Any, ) -> LLMMathChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs) By Harrison Chase © Copyright...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
0e552595d0f1-0
Source code for langchain.chains.llm_summarization_checker.base """Chain for summarization with self-verification.""" from __future__ import annotations import warnings from pathlib import Path from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import Ba...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
0e552595d0f1-1
verbose=verbose, ), LLMChain( llm=llm, prompt=check_assertions_prompt, output_key="checked_assertions", verbose=verbose, ), LLMChain( llm=llm, prompt=revised_summary_prompt, ...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
0e552595d0f1-2
input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: max_checks: int = 2 """Maximum number of times to check the assertions. Default to double-checking.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitr...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
0e552595d0f1-3
def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() all_true = False count = 0 output = None original_input ...
https://python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html