id stringlengths 14 15 | text stringlengths 35 2.51k | source stringlengths 61 154 |
|---|---|---|
1d81dac07857-4 | async def _arun(
self,
url: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Run the tool asynchronously."""
return await self.requests_wrapper.adelete(_clean_url(url)) | https://api.python.langchain.com/en/latest/_modules/langchain/tools/requests/tool.html |
d41630dabb12-0 | Source code for langchain.tools.brave_search.tool
from __future__ import annotations
from typing import Any, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.base import BaseTool
from langchain.utilities.brave_search import Brav... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/brave_search/tool.html |
020fce48dd9e-0 | Source code for langchain.tools.sleep.tool
"""Tool for agent to sleep."""
from asyncio import sleep as asleep
from time import sleep
from typing import Optional, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/sleep/tool.html |
4a63ccd5f4e7-0 | Source code for langchain.tools.sql_database.tool
# flake8: noqa
"""Tools for interacting with a SQL database."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncC... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/sql_database/tool.html |
4a63ccd5f4e7-1 | """Execute the query, return the results or an error message."""
return self.db.run_no_throw(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
raise NotImplementedError("QuerySqlDbTool does not support async"... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/sql_database/tool.html |
4a63ccd5f4e7-2 | ) -> str:
"""Get the schema for a specific table."""
return ", ".join(self.db.get_usable_table_names())
async def _arun(
self,
tool_input: str = "",
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
raise NotImplementedError("ListTablesSqlDbT... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/sql_database/tool.html |
4a63ccd5f4e7-3 | )
return values
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the LLM to check the query."""
return self.llm_chain.predict(query=query, dialect=self.db.dialect)
async def _arun(
self,
quer... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/sql_database/tool.html |
e50ba1e48ae8-0 | Source code for langchain.tools.jira.tool
"""
This tool allows agents to interact with the atlassian-python-api library
and operate on a Jira instance. For more information on the
atlassian-python-api library, see https://atlassian-python-api.readthedocs.io/jira.html
To use this tool, you must first set as environment ... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/jira/tool.html |
e50ba1e48ae8-1 | """Use the Atlassian Jira API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
async def _arun(
self,
_: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the Atlassian Jira API to run an operation."""
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/jira/tool.html |
998581c9d35c-0 | Source code for langchain.tools.office365.utils
"""O365 tool utils."""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from O365 import Account
logger = logging.getLogger(__name__)
[docs]def clean_body(body: str) -> str:
"""Clean body of a message o... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/utils.html |
998581c9d35c-1 | if account.is_authenticated is False:
if not account.authenticate(
scopes=[
"https://graph.microsoft.com/Mail.ReadWrite",
"https://graph.microsoft.com/Mail.Send",
"https://graph.microsoft.com/Calendars.ReadWrite",
"https://graph.microso... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/utils.html |
dff4a8d835e4-0 | Source code for langchain.tools.office365.create_draft_message
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.office365.base import O365BaseTool
[docs]class ... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/create_draft_message.html |
dff4a8d835e4-1 | # Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(cc)
message.save_draft()
output = "Draft created: " + str(message)
re... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/create_draft_message.html |
0446343462b2-0 | Source code for langchain.tools.office365.send_event
"""Util that sends calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from lan... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_event.html |
0446343462b2-1 | description=" The end datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_event.html |
0446343462b2-2 | # TO-DO: Look into PytzUsageWarning
event.save()
output = "Event sent: " + str(event)
return output
async def _arun(
self,
message: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_m... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_event.html |
52f14fc1be3f-0 | Source code for langchain.tools.office365.events_search
"""Util that Searches calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import Any, Dict, List, Optional, Type
from pydantic import BaseMode... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html |
52f14fc1be3f-1 | " components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
max_results: int = F... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html |
52f14fc1be3f-2 | extra = Extra.forbid
def _run(
self,
start_datetime: str,
end_datetime: str,
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
TRUNCATE_LIMIT = 150
# Get calendar objec... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html |
52f14fc1be3f-3 | "%Y-%m-%dT%H:%M:%S%z"
)
output_event["end_datetime"] = event.end.astimezone(time_zone).strftime(
"%Y-%m-%dT%H:%M:%S%z"
)
output_event["modified_date"] = event.modified.astimezone(
time_zone
).strftime("%Y-%m-%dT%H:%M:%S%z")
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/events_search.html |
708d00a0d1e4-0 | Source code for langchain.tools.office365.messages_search
"""Util that Searches email messages in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from typing import Any, Dict, List, Optional, Type
from pydantic import BaseModel, Extra, Field
from langchain.call... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html |
708d00a0d1e4-1 | "range example: received:2023-06-08..2023-06-09 matching example: "
"from:amy OR from:david."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html |
708d00a0d1e4-2 | if folder != "":
mailbox = mailbox.get_folder(folder_name=folder)
# Retrieve messages based on query
query = mailbox.q().search(query)
messages = mailbox.get_messages(limit=max_results, query=query)
# Generate output dict
output_messages = []
for message in me... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/messages_search.html |
8ef45f3cc468-0 | Source code for langchain.tools.office365.base
"""Base class for Gmail tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from pydantic import Field
from langchain.tools.base import BaseTool
from langchain.tools.office365.utils import authenticate
if TYPE_CHECKING:
from O365 import Accoun... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/base.html |
b2e89f17bfef-0 | Source code for langchain.tools.office365.send_message
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.office365.base import O365BaseTool
[docs]class SendMess... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_message.html |
b2e89f17bfef-1 | message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(cc)
message.send()
output = "Message sent: " + str(message)
return output
async def _arun(
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/office365/send_message.html |
304441ecf07d-0 | Source code for langchain.tools.pubmed.tool
"""Tool for the Pubmed API."""
from typing import Optional
from pydantic import Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.base import BaseTool
from langchain.utilities.pupmed impor... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/pubmed/tool.html |
8311156d248b-0 | Source code for langchain.tools.steamship_image_generation.utils
"""Steamship Utils."""
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from steamship import Block, Steamship
[docs]def make_image_public(client: Steamship, block: Block) -> str:
"""Upload a block ... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/utils.html |
8d501f43f946-0 | Source code for langchain.tools.steamship_image_generation.tool
"""This tool allows agents to generate images using Steamship.
Steamship offers access to different third party image generation APIs
using a single API key.
Today the following models are supported:
- Dall-E
- Stable Diffusion
To use this tool, you must f... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html |
8d501f43f946-1 | description = (
"Useful for when you need to generate an image."
"Input: A detailed text-2-image prompt describing an image"
"Output: the UUID of a generated image"
)
[docs] @root_validator(pre=True)
def validate_size(cls, values: Dict) -> Dict:
if "size" in values:
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html |
8d501f43f946-2 | )
task = image_generator.generate(text=query, append_output_to_file=True)
task.wait()
blocks = task.output.blocks
if len(blocks) > 0:
if self.return_urls:
return make_image_public(self.steamship, blocks[0])
else:
return blocks[0].id... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/steamship_image_generation/tool.html |
248ef5718c2b-0 | Source code for langchain.tools.powerbi.tool
"""Tools for interacting with a Power BI dataset."""
import logging
from time import perf_counter
from typing import Any, Dict, Optional, Tuple
from pydantic import Field, validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackMan... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html |
248ef5718c2b-1 | def validate_llm_chain_input_variables( # pylint: disable=E0213
cls, llm_chain: LLMChain
) -> LLMChain:
"""Make sure the LLM chain has the correct input variables."""
if llm_chain.prompt.input_variables != [
"tool_input",
"tables",
"schemas",
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html |
248ef5718c2b-2 | tables=self.powerbi.get_table_names(),
schemas=self.powerbi.get_schemas(),
examples=self.examples,
)
except Exception as exc: # pylint: disable=broad-except
self.session_cache[tool_input] = f"Error on call to LLM: {exc}"
return self.session_ca... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html |
248ef5718c2b-3 | async def _arun(
self,
tool_input: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Execute the query, return the results or an error message."""
if cache := self._check_cache(tool_input):
logger.debug("Found c... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html |
248ef5718c2b-4 | return self.session_cache[tool_input]
iterations = kwargs.get("iterations", 0)
if error and iterations < self.max_iterations:
return await self._arun(
tool_input=RETRY_RESPONSE.format(
tool_input=tool_input, query=query, error=error
),
... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html |
248ef5718c2b-5 | Example Input: "table1, table2, table3"
""" # noqa: E501
powerbi: PowerBIDataset = Field(exclude=True)
[docs] class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _run(
self,
tool_input: str,
run_manager: Optional[Callback... | https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html |
248ef5718c2b-6 | self,
tool_input: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Get the names of the tables."""
return ", ".join(self.powerbi.get_table_names()) | https://api.python.langchain.com/en/latest/_modules/langchain/tools/powerbi/tool.html |
16d0d2a3d814-0 | langchain.chat_models.vertexai.ChatVertexAI¶
class langchain.chat_models.vertexai.ChatVertexAI(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = Non... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.vertexai.ChatVertexAI.html |
16d0d2a3d814-1 | The amount of parallelism allowed for requests issued to VertexAI models.
param stop: Optional[List[str]] = None¶
Optional list of stop words to use when generating.
param tags: Optional[List[str]] = None¶
Tags to add to the run trace.
param temperature: float = 0.0¶
Sampling temperature, it controls the degree of rand... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.vertexai.ChatVertexAI.html |
16d0d2a3d814-2 | Predict text from text.
async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶
Predict message from messages.
call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶
dict(**kwargs: Any) → Dict¶
Return a dictionary of the LLM.... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.vertexai.ChatVertexAI.html |
16d0d2a3d814-3 | to_json_not_implemented() → SerializedNotImplemented¶
validator validate_environment » all fields[source]¶
Validate that the python package exists in environment.
property is_codey_model: bool¶
property lc_attributes: Dict¶
Return a list of attribute names that should be included in the
serialized kwargs. These attri... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.vertexai.ChatVertexAI.html |
40d7032c2060-0 | langchain.chat_models.fake.FakeListChatModel¶
class langchain.chat_models.fake.FakeListChatModel(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = N... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.fake.FakeListChatModel.html |
40d7032c2060-1 | Take in a list of prompt values and return an LLMResult.
classmethod all_required_field_names() → Set¶
async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶
Predict text from text.
async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.fake.FakeListChatModel.html |
40d7032c2060-2 | Predict message from messages.
validator raise_deprecation » all fields¶
Raise deprecation warning if callback_manager is used.
to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶
to_json_not_implemented() → SerializedNotImplemented¶
property lc_attributes: Dict¶
Return a list of attribute names that ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.fake.FakeListChatModel.html |
38d3b4101d5d-0 | langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI¶
class langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] =... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html |
38d3b4101d5d-1 | returned in the generation_info field of the
Generation object.
Example
from langchain.chat_models import PromptLayerChatOpenAI
openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo")
Create a new model by parsing and validating input data from keyword arguments.
Raises ValidationError if the input data cannot be pa... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html |
38d3b4101d5d-2 | param tags: Optional[List[str]] = None¶
Tags to add to the run trace.
param temperature: float = 0.7¶
What sampling temperature to use.
param tiktoken_model_name: Optional[str] = None¶
The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html |
38d3b4101d5d-3 | Take in a list of prompt values and return an LLMResult.
classmethod all_required_field_names() → Set¶
async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶
Predict text from text.
async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html |
38d3b4101d5d-4 | Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
get_token_ids(text: str) → List[int]¶
Get the tokens present in the text with tiktoken package.
predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶
Predict tex... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.promptlayer_openai.PromptLayerChatOpenAI.html |
ec65df64c072-0 | langchain.chat_models.azure_openai.AzureChatOpenAI¶
class langchain.chat_models.azure_openai.AzureChatOpenAI(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[Li... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html |
ec65df64c072-1 | 35-turbo-dev, the constructor should look like:
AzureChatOpenAI(
deployment_name="35-turbo-dev",
openai_api_version="2023-03-15-preview",
)
Be aware the API version may change.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Crea... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html |
ec65df64c072-2 | Timeout for requests to OpenAI completion API. Default is 600 seconds.
param streaming: bool = False¶
Whether to stream the results or not.
param tags: Optional[List[str]] = None¶
Tags to add to the run trace.
param temperature: float = 0.7¶
What sampling temperature to use.
param tiktoken_model_name: Optional[str] = N... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html |
ec65df64c072-3 | Top Level call
async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶
Take in a list of prompt values and return an LLMResult.
classmethod all_required_field_names() → Set¶
async ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html |
ec65df64c072-4 | Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶
Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
get_to... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html |
ec65df64c072-5 | model Config¶
Bases: object
Configuration for this pydantic object.
allow_population_by_field_name = True¶ | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.azure_openai.AzureChatOpenAI.html |
7f29db8b29d6-0 | langchain.chat_models.anthropic.ChatAnthropic¶
class langchain.chat_models.anthropic.ChatAnthropic(*, client: Any = None, model: str = 'claude-v1', max_tokens_to_sample: int = 256, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, streaming: bool = False, default_request_t... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.anthropic.ChatAnthropic.html |
7f29db8b29d6-1 | param anthropic_api_url: Optional[str] = None¶
param cache: Optional[bool] = None¶
param callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None¶
param callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None¶
param count... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.anthropic.ChatAnthropic.html |
7f29db8b29d6-2 | Top Level call
async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶
Take in a list of prompt values and return an LLMResult.
classmethod all_required_field_names() → Set¶
async ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.anthropic.ChatAnthropic.html |
7f29db8b29d6-3 | Get the token present in the text.
predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶
Predict text from text.
predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶
Predict message from messages.
validator raise_deprecation » ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.anthropic.ChatAnthropic.html |
13a3376ec55c-0 | langchain.chat_models.base.BaseChatModel¶
class langchain.chat_models.base.BaseChatModel(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None)[sou... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.BaseChatModel.html |
13a3376ec55c-1 | Take in a list of prompt values and return an LLMResult.
classmethod all_required_field_names() → Set¶
async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str[source]¶
Predict text from text.
async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.BaseChatModel.html |
13a3376ec55c-2 | Predict text from text.
predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage[source]¶
Predict message from messages.
validator raise_deprecation » all fields[source]¶
Raise deprecation warning if callback_manager is used.
to_json() → Union[SerializedConst... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.BaseChatModel.html |
363a4d1a6b23-0 | langchain.chat_models.google_palm.ChatGooglePalmError¶
class langchain.chat_models.google_palm.ChatGooglePalmError[source]¶
Bases: Exception
Error raised when there is an issue with the Google PaLM API.
add_note()¶
Exception.add_note(note) –
add a note to the exception
with_traceback()¶
Exception.with_traceback(tb) –
s... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalmError.html |
816072f55a9f-0 | langchain.chat_models.openai.ChatOpenAI¶
class langchain.chat_models.openai.ChatOpenAI(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, clien... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html |
816072f55a9f-1 | param callbacks: Callbacks = None¶
param max_retries: int = 6¶
Maximum number of retries to make when generating.
param max_tokens: Optional[int] = None¶
Maximum number of tokens to generate.
param model_kwargs: Dict[str, Any] [Optional]¶
Holds any model parameters valid for create call not explicitly specified.
param ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html |
816072f55a9f-2 | supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here.
param verbose: bool [Optional]¶
Whethe... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html |
816072f55a9f-3 | completion_with_retry(**kwargs: Any) → Any[source]¶
Use tenacity to retry the completion call.
dict(**kwargs: Any) → Dict¶
Return a dictionary of the LLM.
generate(messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, t... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html |
816072f55a9f-4 | to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶
to_json_not_implemented() → SerializedNotImplemented¶
validator validate_environment » all fields[source]¶
Validate that api key and python package exists in environment.
property lc_attributes: Dict¶
Return a list of attribute names that should be i... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.openai.ChatOpenAI.html |
96f64fcf452a-0 | langchain.chat_models.google_palm.chat_with_retry¶
langchain.chat_models.google_palm.chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) → Any[source]¶
Use tenacity to retry the completion call. | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.chat_with_retry.html |
32b03e159cd7-0 | langchain.chat_models.google_palm.ChatGooglePalm¶
class langchain.chat_models.google_palm.ChatGooglePalm(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[s... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalm.html |
32b03e159cd7-1 | param tags: Optional[List[str]] = None¶
Tags to add to the run trace.
param temperature: Optional[float] = None¶
Run inference with this temperature. Must by in the closed
interval [0.0, 1.0].
param top_k: Optional[int] = None¶
Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positiv... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalm.html |
32b03e159cd7-2 | Predict text from text.
async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶
Predict message from messages.
call_as_llm(message: str, stop: Optional[List[str]] = None, **kwargs: Any) → str¶
dict(**kwargs: Any) → Dict¶
Return a dictionary of the LLM.... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalm.html |
32b03e159cd7-3 | to_json_not_implemented() → SerializedNotImplemented¶
validator validate_environment » all fields[source]¶
Validate api key, python package exists, temperature, top_p, and top_k.
property lc_attributes: Dict¶
Return a list of attribute names that should be included in the
serialized kwargs. These attributes must be a... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.google_palm.ChatGooglePalm.html |
f0231b42cecb-0 | langchain.chat_models.base.SimpleChatModel¶
class langchain.chat_models.base.SimpleChatModel(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None)... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.SimpleChatModel.html |
f0231b42cecb-1 | Take in a list of prompt values and return an LLMResult.
classmethod all_required_field_names() → Set¶
async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶
Predict text from text.
async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.SimpleChatModel.html |
f0231b42cecb-2 | Predict message from messages.
validator raise_deprecation » all fields¶
Raise deprecation warning if callback_manager is used.
to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶
to_json_not_implemented() → SerializedNotImplemented¶
property lc_attributes: Dict¶
Return a list of attribute names that ... | https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.SimpleChatModel.html |
fd7e90e3fd56-0 | langchain.utils.raise_for_status_with_text¶
langchain.utils.raise_for_status_with_text(response: Response) → None[source]¶
Raise an error with the response text. | https://api.python.langchain.com/en/latest/utils/langchain.utils.raise_for_status_with_text.html |
3af79e427c87-0 | langchain.utils.stringify_value¶
langchain.utils.stringify_value(val: Any) → str[source]¶
Stringify a value.
Parameters
val – The value to stringify.
Returns
The stringified value.
Return type
str | https://api.python.langchain.com/en/latest/utils/langchain.utils.stringify_value.html |
b5ac27728bb0-0 | langchain.utils.guard_import¶
langchain.utils.guard_import(module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None) → Any[source]¶
Dynamically imports a module and raises a helpful exception if the module is not
installed. | https://api.python.langchain.com/en/latest/utils/langchain.utils.guard_import.html |
d9b2f9df49f1-0 | langchain.utils.comma_list¶
langchain.utils.comma_list(items: List[Any]) → str[source]¶ | https://api.python.langchain.com/en/latest/utils/langchain.utils.comma_list.html |
d766e747edad-0 | langchain.utils.stringify_dict¶
langchain.utils.stringify_dict(data: dict) → str[source]¶
Stringify a dictionary.
Parameters
data – The dictionary to stringify.
Returns
The stringified dictionary.
Return type
str | https://api.python.langchain.com/en/latest/utils/langchain.utils.stringify_dict.html |
45098d1643d1-0 | langchain.utils.get_from_dict_or_env¶
langchain.utils.get_from_dict_or_env(data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None) → str[source]¶
Get a value from a dictionary or an environment variable. | https://api.python.langchain.com/en/latest/utils/langchain.utils.get_from_dict_or_env.html |
b3873011e843-0 | langchain.utils.get_from_env¶
langchain.utils.get_from_env(key: str, env_key: str, default: Optional[str] = None) → str[source]¶
Get a value from a dictionary or an environment variable. | https://api.python.langchain.com/en/latest/utils/langchain.utils.get_from_env.html |
c0b04f25fdd4-0 | langchain.utils.mock_now¶
langchain.utils.mock_now(dt_value)[source]¶
Context manager for mocking out datetime.now() in unit tests.
Example:
with mock_now(datetime.datetime(2011, 2, 3, 10, 11)):
assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11) | https://api.python.langchain.com/en/latest/utils/langchain.utils.mock_now.html |
bb116e03c8b6-0 | langchain.utils.xor_args¶
langchain.utils.xor_args(*arg_groups: Tuple[str, ...]) → Callable[source]¶
Validate specified keyword args are mutually exclusive. | https://api.python.langchain.com/en/latest/utils/langchain.utils.xor_args.html |
08af877cbb86-0 | langchain.llms.azureml_endpoint.DollyContentFormatter¶
class langchain.llms.azureml_endpoint.DollyContentFormatter[source]¶
Bases: ContentFormatterBase
Content handler for the Dolly-v2-12b model
Methods
__init__()
format_request_payload(prompt, model_kwargs)
Formats the request body according to the input schema of the... | https://api.python.langchain.com/en/latest/llms/langchain.llms.azureml_endpoint.DollyContentFormatter.html |
821ea6ff17fb-0 | langchain.llms.google_palm.GooglePalm¶
class langchain.llms.google_palm.GooglePalm(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, client: A... | https://api.python.langchain.com/en/latest/llms/langchain.llms.google_palm.GooglePalm.html |
821ea6ff17fb-1 | [0.0, 1.0].
param top_k: Optional[int] = None¶
Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive.
param top_p: Optional[float] = None¶
Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0... | https://api.python.langchain.com/en/latest/llms/langchain.llms.google_palm.GooglePalm.html |
821ea6ff17fb-2 | Predict message from messages.
dict(**kwargs: Any) → Dict¶
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶
Run the LLM on the... | https://api.python.langchain.com/en/latest/llms/langchain.llms.google_palm.GooglePalm.html |
821ea6ff17fb-3 | validator set_verbose » verbose¶
If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶
to_json_not_implemented() → SerializedNotImplemented¶
validator validate_environment » all fields[source]¶
Valid... | https://api.python.langchain.com/en/latest/llms/langchain.llms.google_palm.GooglePalm.html |
d281e79cc2b9-0 | langchain.llms.deepinfra.DeepInfra¶
class langchain.llms.deepinfra.DeepInfra(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, model_id: str =... | https://api.python.langchain.com/en/latest/llms/langchain.llms.deepinfra.DeepInfra.html |
d281e79cc2b9-1 | param verbose: bool [Optional]¶
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[st... | https://api.python.langchain.com/en/latest/llms/langchain.llms.deepinfra.DeepInfra.html |
d281e79cc2b9-2 | Run the LLM on the given prompt and input.
generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int... | https://api.python.langchain.com/en/latest/llms/langchain.llms.deepinfra.DeepInfra.html |
d281e79cc2b9-3 | property lc_attributes: Dict¶
Return a list of attribute names that should be included in the
serialized kwargs. These attributes must be accepted by the
constructor.
property lc_namespace: List[str]¶
Return the namespace of the langchain object.
eg. [“langchain”, “llms”, “openai”]
property lc_secrets: Dict[str, str]¶
... | https://api.python.langchain.com/en/latest/llms/langchain.llms.deepinfra.DeepInfra.html |
a63c9e4a9bb8-0 | langchain.llms.azureml_endpoint.OSSContentFormatter¶
class langchain.llms.azureml_endpoint.OSSContentFormatter[source]¶
Bases: ContentFormatterBase
Content handler for LLMs from the OSS catalog.
Methods
__init__()
format_request_payload(prompt, model_kwargs)
Formats the request body according to the input schema of the... | https://api.python.langchain.com/en/latest/llms/langchain.llms.azureml_endpoint.OSSContentFormatter.html |
31ee3d02f087-0 | langchain.llms.anyscale.Anyscale¶
class langchain.llms.anyscale.Anyscale(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, model_kwargs: Optio... | https://api.python.langchain.com/en/latest/llms/langchain.llms.anyscale.Anyscale.html |
31ee3d02f087-1 | param callbacks: Callbacks = None¶
param model_kwargs: Optional[dict] = None¶
Key word arguments to pass to the model. Reserved for future use
param tags: Optional[List[str]] = None¶
Tags to add to the run trace.
param verbose: bool [Optional]¶
Whether to print out response text.
__call__(prompt: str, stop: Optional[Li... | https://api.python.langchain.com/en/latest/llms/langchain.llms.anyscale.Anyscale.html |
31ee3d02f087-2 | dict(**kwargs: Any) → Dict¶
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, **kwargs: Any) → LLMResult¶
Run the LLM on the given prompt and input.
genera... | https://api.python.langchain.com/en/latest/llms/langchain.llms.anyscale.Anyscale.html |
31ee3d02f087-3 | This allows users to pass in None as verbose to access the global setting.
to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶
to_json_not_implemented() → SerializedNotImplemented¶
validator validate_environment » all fields[source]¶
Validate that api key and python package exists in environment.
prop... | https://api.python.langchain.com/en/latest/llms/langchain.llms.anyscale.Anyscale.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.