id stringlengths 14 15 | text stringlengths 35 2.51k | source stringlengths 61 154 |
|---|---|---|
04835fe85928-3 | f"a description must always be provided."
)
super()._validate_tools(tools)
[docs]class MRKLChain(AgentExecutor):
"""Chain that implements the MRKL system.
Example:
.. code-block:: python
from langchain import OpenAI, MRKLChain
from langchain.chains.mrkl.ba... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html |
04835fe85928-4 | action_description="useful for searching"
),
ChainConfig(
action_name="Calculator",
action=llm_math_chain.run,
action_description="useful for doing math"
)
]
... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html |
34ca081a3eeb-0 | Source code for langchain.agents.openai_functions_multi_agent.base
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
import json
from dataclasses import dataclass
from json import JSONDecodeError
from typing import Any, List, Optional, Sequence, Tuple, Union
from pydantic import root_valida... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html |
34ca081a3eeb-1 | return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html |
34ca081a3eeb-2 | except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
final_tools: List[AgentAction] = []
for tool_schema in tools:
_tool_input = tool_schema... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html |
34ca081a3eeb-3 | that supports using `functions`.
tools: The tools this agent has access to.
prompt: The prompt for this agent, should support agent_scratchpad as one
of the variables. For an easy way to construct this prompt, use
`OpenAIFunctionsAgent.create_prompt(...)`
"""
llm: BaseLan... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html |
34ca081a3eeb-4 | # a new tool that has one argument which is a list of tools
# to use.
"name": "tool_selection",
"description": "A list of actions to take.",
"parameters": {
"title": "tool_selection",
"description": "A list of actions to take.",
... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html |
34ca081a3eeb-5 | },
}
return [tool_selection]
[docs] def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[List[AgentAction], AgentFinish]:
"""Given input, decided what to do.
Args:
inte... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html |
34ca081a3eeb-6 | selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_mess... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html |
34ca081a3eeb-7 | cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_multi_agent/base.html |
3c6e30d19095-0 | Source code for langchain.agents.chat.output_parser
import json
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FINAL_ANSWER_ACTION = "Final Answer:"
[d... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/output_parser.html |
d87bd02f7a94-0 | Source code for langchain.agents.chat.base
from typing import Any, List, Optional, Sequence, Tuple
from pydantic import Field
from langchain.agents.agent import Agent, AgentOutputParser
from langchain.agents.chat.output_parser import ChatOutputParser
from langchain.agents.chat.prompt import (
FORMAT_INSTRUCTIONS,
... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html |
d87bd02f7a94-1 | f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{agent_scratchpad}"
)
else:
return agent_scratchpad
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return ChatOutputParser()
@clas... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html |
d87bd02f7a94-2 | input_variables = ["input", "agent_scratchpad"]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
[docs] @classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager]... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/chat/base.html |
2da8aef2e1b7-0 | Source code for langchain.agents.conversational_chat.output_parser
from __future__ import annotations
from typing import Union
from langchain.agents import AgentOutputParser
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers.json import parse_json_markdown
from lan... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/output_parser.html |
371e00018284-0 | Source code for langchain.agents.conversational_chat.base
"""An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Tuple
from pydantic import Field
from langchain.agents.agent import Agent, AgentOutputParser
from lang... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html |
371e00018284-1 | return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, too... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html |
371e00018284-2 | ) -> List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(
... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational_chat/base.html |
9acb55703625-0 | Source code for langchain.agents.react.output_parser
import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
[docs]class ReActOutputParser(AgentOutputParser):
[docs] def parse(self, text: str) -> Union[AgentA... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/output_parser.html |
20836c481a47-0 | Source code for langchain.agents.react.base
"""Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf."""
from typing import Any, List, Optional, Sequence
from pydantic import Field
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain.agents.agent_types impo... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html |
20836c481a47-1 | super()._validate_tools(tools)
if len(tools) != 2:
raise ValueError(f"Exactly two tools must be specified, but got {tools}")
tool_names = {tool.name for tool in tools}
if tool_names != {"Lookup", "Search"}:
raise ValueError(
f"Tool names should be Lookup a... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html |
20836c481a47-2 | if term.lower() != self.lookup_str:
self.lookup_str = term.lower()
self.lookup_index = 0
else:
self.lookup_index += 1
lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()]
if len(lookups) == 0:
return "No Results"
elif sel... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html |
20836c481a47-3 | raise ValueError(f"Tool name should be Play, got {tool_names}")
[docs]class ReActChain(AgentExecutor):
"""Chain that implements the ReAct paper.
Example:
.. code-block:: python
from langchain import ReActChain, OpenAI
react = ReAct(llm=OpenAI())
"""
def __init__(self, llm... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/react/base.html |
ef3bf11a48b2-0 | Source code for langchain.agents.conversational.output_parser
import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
[docs]class ConvoOutpu... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/output_parser.html |
2a188175a89b-0 | Source code for langchain.agents.conversational.base
"""An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence
from pydantic import Field
from langchain.agents.agent import Agent, AgentOutputParser
from langchain.agents... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html |
2a188175a89b-1 | [docs] @classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
ai_prefix: str = "AI",
human_prefix: str = "Human",
input_variables: Optional[List[str... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html |
2a188175a89b-2 | validate_tools_single_input(cls.__name__, tools)
[docs] @classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: s... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html |
9592a1d8c1c9-0 | Source code for langchain.agents.openai_functions_agent.base
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
import json
from dataclasses import dataclass
from json import JSONDecodeError
from typing import Any, List, Optional, Sequence, Tuple, Union
from pydantic import root_validator
fr... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html |
9592a1d8c1c9-1 | ]
else:
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
obs... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html |
9592a1d8c1c9-2 | function_name = function_call["name"]
try:
_tool_input = json.loads(function_call["arguments"])
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html |
9592a1d8c1c9-3 | of the variables. For an easy way to construct this prompt, use
`OpenAIFunctionsAgent.create_prompt(...)`
"""
llm: BaseLanguageModel
tools: Sequence[BaseTool]
prompt: BasePromptTemplate
[docs] def get_allowed_tools(self) -> List[str]:
"""Get allowed tools."""
return list([... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html |
9592a1d8c1c9-4 | **kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html |
9592a1d8c1c9-5 | )
agent_decision = _parse_ai_message(predicted_message)
return agent_decision
[docs] @classmethod
def create_prompt(
cls,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
extra_prompt_messages: Option... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html |
9592a1d8c1c9-6 | """Construct an agent from an LLM and tools."""
if not isinstance(llm, ChatOpenAI):
raise ValueError("Only supported with ChatOpenAI models.")
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message,
)
ret... | https://api.python.langchain.com/en/latest/_modules/langchain/agents/openai_functions_agent/base.html |
41a69e91d865-0 | Source code for langchain.graphs.networkx_graph
"""Networkx wrapper for graph operations."""
from __future__ import annotations
from typing import Any, List, NamedTuple, Optional, Tuple
KG_TRIPLE_DELIMITER = "<|>"
[docs]class KnowledgeTriple(NamedTuple):
"""A triple in the graph."""
subject: str
predicate: ... | https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html |
41a69e91d865-1 | """Create a new graph."""
try:
import networkx as nx
except ImportError:
raise ImportError(
"Could not import networkx python package. "
"Please install it with `pip install networkx`."
)
if graph is not None:
if not... | https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html |
41a69e91d865-2 | if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_):
self._graph.remove_edge(knowledge_triple.subject, knowledge_triple.object_)
def get_triples(self) -> List[Tuple[str, str, str]]:
"""Get all triples in the graph."""
return [(u, v, d["relation"]) for u, v, d in s... | https://api.python.langchain.com/en/latest/_modules/langchain/graphs/networkx_graph.html |
d7082c128b0b-0 | Source code for langchain.chat_models.google_palm
"""Wrapper around Google's PaLM Chat API."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional
from pydantic import BaseModel, root_validator
from tenacity import (
before_sleep_log,
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
d7082c128b0b-1 | if not response.candidates:
raise ChatGooglePalmError("ChatResponse must have at least one candidate.")
generations: List[ChatGeneration] = []
for candidate in response.candidates:
author = candidate.get("author")
if author is None:
raise ChatGooglePalmError(f"ChatResponse mu... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
d7082c128b0b-2 | if isinstance(input_message, SystemMessage):
if index != 0:
raise ChatGooglePalmError("System message must be first input message.")
context = input_message.content
elif isinstance(input_message, HumanMessage) and input_message.example:
if messages:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
d7082c128b0b-3 | "Messages without an explicit role not supported by PaLM API."
)
return genai.types.MessagePromptDict(
context=context,
examples=examples,
messages=messages,
)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to h... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
d7082c128b0b-4 | async def _achat_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.chat_async(**kwargs)
return await _achat_with_retry(**kwargs)
[docs]class ChatGooglePalm(BaseChatModel, BaseModel):
"""Wrapper around Google's PaL... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
d7082c128b0b-5 | not return the full n completions if duplicates are generated."""
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists, temperature, top_p, and top_k."""
google_api_key = get_from_dict_or_env(
values, "google_api_key"... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
d7082c128b0b-6 | self,
model=self.model_name,
prompt=prompt,
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
candidate_count=self.n,
**kwargs,
)
return _response_to_result(response, stop)
async def _agenerate(
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
c7eea902d638-0 | Source code for langchain.chat_models.anthropic
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.anthropic import _AnthropicCommon
from langch... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
c7eea902d638-1 | message_text = f"{self.AI_PROMPT} {message.content}"
elif isinstance(message, SystemMessage):
message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _convert_messages_to_text... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
c7eea902d638-2 | run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs}
if stop:
params["stop_sequences"] = stop
if se... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
c7eea902d638-3 | delta,
)
else:
response = await self.client.acompletion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
[docs] def get_num_tokens(self, text: str)... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
ed25c0639cc7-0 | Source code for langchain.chat_models.promptlayer_openai
"""PromptLayer wrapper."""
import datetime
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models import ChatOpenAI
from langchain.sch... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
ed25c0639cc7-1 | **kwargs: Any
) -> ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
ed25c0639cc7-2 | request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(messages, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
247f1d3da487-0 | Source code for langchain.chat_models.azure_openai
"""Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping
from pydantic import root_validator
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import ChatResult
from langchain.utils... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
247f1d3da487-1 | openai_api_base: str = ""
openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
openai_proxy: str = ""
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
247f1d3da487-2 | except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
ad47a5d5fbd1-0 | Source code for langchain.chat_models.vertexai
"""Wrapper around Google VertexAI chat-based models."""
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManage... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
ad47a5d5fbd1-1 | ValueError: If a sequence of message is odd, or a human message is not followed
by a message from AI (e.g., Human, Human, AI or AI, AI, Human).
"""
if not history:
return _ChatHistory()
first_message = history[0]
system_message = first_message if isinstance(first_message, SystemMessa... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
ad47a5d5fbd1-2 | else:
from vertexai.preview.language_models import ChatModel
values["client"] = ChatModel.from_pretrained(values["model_name"])
except ImportError:
raise_vertex_import_error()
return values
def _generate(
self,
messages: List[BaseMessage],
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
ad47a5d5fbd1-3 | chat._history.append((pair.question.content, pair.answer.content))
response = chat.send_message(question.content, **params)
text = self._enforce_stop_words(response.text, stop)
return ChatResult(generations=[ChatGeneration(message=AIMessage(content=text))])
async def _agenerate(
self... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
6b438210d91b-0 | Source code for langchain.chat_models.base
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.base_language import BaseL... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-1 | )
values["callbacks"] = values.pop("callback_manager", None)
return values
[docs] class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
return {}
def _ge... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-2 | self.verbose,
tags,
self.tags,
)
run_managers = callback_manager.on_chat_model_start(
dumpd(self), messages, invocation_params=params, options=options
)
results = []
for i, m in enumerate(messages):
try:
results.appe... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-3 | ) -> LLMResult:
"""Top Level call"""
params = self._get_invocation_params(stop=stop)
options = {"stop": stop}
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
)... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-4 | generations = [res.generations for res in results]
output = LLMResult(generations=generations, llm_output=llm_output)
await asyncio.gather(
*[
run_manager.on_llm_end(flattened_output)
for run_manager, flattened_output in zip(
run_managers, ... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-5 | "run_manager"
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-6 | )
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"As... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-7 | self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
[docs] def __call__(
self,
messages: List[BaseMessage],
stop: Opti... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-8 | ) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
result = self([HumanMessage(content=text)], stop=_stop, **kwargs)
return result.content
[docs] def predict_messages(
self,
messages: List[BaseMessage],
*,
stop: Opt... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
6b438210d91b-9 | """Return type of chat model."""
[docs] def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
[docs]class SimpleChatModel(BaseChatModel):
def _generate(
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/base.html |
0f3de4aa2d61-0 | Source code for langchain.chat_models.fake
"""Fake ChatModel for testing purposes."""
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import SimpleChatModel
from langchain.schema import BaseMessage
[docs]class FakeListChatM... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/fake.html |
0182f441fbdb-0 | Source code for langchain.chat_models.openai
"""OpenAI chat wrapper."""
from __future__ import annotations
import logging
import sys
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
)
from pydantic import Field, root_validator
from tenac... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-1 | return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-2 | elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-3 | Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@property
def lc_serializable(self) -... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-4 | max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default,... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-5 | )
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead t... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-6 | "due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when strea... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-7 | ),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs] def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-8 | role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content") or ""
inner_completion += token
_function_call = stream_resp["choices"][0]["delta"].get("function_call")
if _function_call:
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-9 | gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-10 | return ChatResult(generations=[ChatGeneration(message=message)])
else:
response = await acompletion_with_retry(
self, messages=message_dicts, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Mapping[str, A... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-11 | # gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
0182f441fbdb-12 | return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model.startswith("gpt-3.5-turbo"):
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
... | https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
8d1dda5f1ede-0 | Source code for langchain.llms.gpt4all
"""Wrapper for the GPT4All model."""
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
8d1dda5f1ede-1 | logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
8d1dda5f1ede-2 | starting from beginning if the context has run out."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
client: Any = None #: :meta private:
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmet... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
8d1dda5f1ede-3 | model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
)
if values["n_threads"] is not None:
# set n_threads
... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
8d1dda5f1ede-4 | The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, v... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/gpt4all.html |
1421f95d0721-0 | Source code for langchain.llms.utils
"""Common utility functions for working with LLM APIs."""
import re
from typing import List
[docs]def enforce_stop_tokens(text: str, stop: List[str]) -> str:
"""Cut off the text as soon as any stop words occur."""
return re.split("|".join(stop), text)[0] | https://api.python.langchain.com/en/latest/_modules/langchain/llms/utils.html |
c4593bf528e2-0 | Source code for langchain.llms.databricks
import os
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langch... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html |
c4593bf528e2-1 | return values
def post(self, request: Any) -> Any:
# See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html
wrapped_request = {"dataframe_records": [request]}
response = self.post_raw(wrapped_request)["predictions"]
# For a single-record que... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html |
c4593bf528e2-2 | """Gets the default Databricks workspace hostname.
Raises an error if the hostname cannot be automatically determined.
"""
host = os.getenv("DATABRICKS_HOST")
if not host:
try:
host = get_repl_context().browserHostName
if not host:
raise ValueError("contex... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html |
c4593bf528e2-3 | * **Serving endpoint** (recommended for both production and development).
We assume that an LLM was registered and deployed to a serving endpoint.
To wrap it as an LLM you must have "Can Query" permission to the endpoint.
Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and
``clus... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html |
c4593bf528e2-4 | If the endpoint model signature is different or you want to set extra params,
you can use `transform_input_fn` and `transform_output_fn` to apply necessary
transformations before and after the query.
"""
host: str = Field(default_factory=get_default_host)
"""Databricks workspace hostname.
If not... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html |
c4593bf528e2-5 | You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_driver_port: Optional[str] = None
"""The port number used by the HTTP server running on the cluster driver node.
The server should listen on the driver IP address or simply ``0.0.0.0`` to connect.
We recommend the server using a... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html |
c4593bf528e2-6 | except Exception as e:
raise ValueError(
"Neither endpoint_name nor cluster_id was set. "
"And the cluster_id cannot be automatically determined. Received"
f" error: {e}"
)
[docs] @validator("cluster_driver_port", always=True... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html |
c4593bf528e2-7 | api_token=self.api_token,
cluster_id=self.cluster_id,
cluster_driver_port=self.cluster_driver_port,
)
else:
raise ValueError(
"Must specify either endpoint_name or cluster_id/cluster_driver_port."
)
@property
def _llm_ty... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/databricks.html |
2ed5641a091b-0 | Source code for langchain.llms.huggingface_endpoint
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html |
2ed5641a091b-1 | [docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_o... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html |
2ed5641a091b-2 | def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html |
2ed5641a091b-3 | text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if ... | https://api.python.langchain.com/en/latest/_modules/langchain/llms/huggingface_endpoint.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.