sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
langchain-ai/langchain:libs/langchain/langchain_classic/tools/human/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import HumanInputRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"HumanInputRun": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"HumanInputRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/human/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/interaction/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import StdInInquireTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"StdInInquireTool": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"StdInInquireTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/interaction/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/jira/tool.py | """This module provides dynamic access to deprecated Jira tools.
When attributes like `JiraAction` are accessed, they are redirected to their new
locations in `langchain_community.tools`. This ensures backward compatibility
while warning developers about deprecation.
Attributes:
JiraAction (deprecated): Dynamically loaded from langchain_community.tools.
"""
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JiraAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"JiraAction": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Dynamically retrieve attributes from the updated module path.
Args:
name: The name of the attribute to import.
Returns:
The resolved attribute from the updated path.
"""
return _import_attribute(name)
__all__ = [
"JiraAction",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/jira/tool.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/json/tool.py | """This module provides dynamic access to deprecated JSON tools in LangChain.
It ensures backward compatibility by forwarding references such as
`JsonGetValueTool`, `JsonListKeysTool`, and `JsonSpec` to their updated
locations within the `langchain_community.tools` namespace.
This setup allows legacy code to continue working while guiding developers
toward using the updated module paths.
"""
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JsonGetValueTool, JsonListKeysTool
from langchain_community.tools.json.tool import JsonSpec
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"JsonSpec": "langchain_community.tools.json.tool",
"JsonListKeysTool": "langchain_community.tools",
"JsonGetValueTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Dynamically retrieve attributes from the updated module path.
This method is used to resolve deprecated attribute imports
at runtime and forward them to their new locations.
Args:
name: The name of the attribute to import.
Returns:
The resolved attribute from the appropriate updated module.
"""
return _import_attribute(name)
__all__ = [
"JsonGetValueTool",
"JsonListKeysTool",
"JsonSpec",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/json/tool.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/memorize/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.memorize.tool import Memorize, TrainableLLM
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TrainableLLM": "langchain_community.tools.memorize.tool",
"Memorize": "langchain_community.tools.memorize.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Memorize",
"TrainableLLM",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/memorize/tool.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/merriam_webster/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import MerriamWebsterQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"MerriamWebsterQueryRun": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MerriamWebsterQueryRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/merriam_webster/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/metaphor_search/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import MetaphorSearchResults
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"MetaphorSearchResults": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MetaphorSearchResults",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/metaphor_search/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/nasa/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import NasaAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"NasaAction": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NasaAction",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/nasa/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/nuclia/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.nuclia.tool import NUASchema, NucliaUnderstandingAPI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NUASchema": "langchain_community.tools.nuclia.tool",
"NucliaUnderstandingAPI": "langchain_community.tools.nuclia.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NUASchema",
"NucliaUnderstandingAPI",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/nuclia/tool.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/office365/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.office365.base import O365BaseTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"O365BaseTool": "langchain_community.tools.office365.base"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365BaseTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/office365/base.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/office365/send_message.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SendMessage
from langchain_community.tools.office365.send_message import SendMessageSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SendMessageSchema": "langchain_community.tools.office365.send_message",
"O365SendMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365SendMessage",
"SendMessageSchema",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/office365/send_message.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/openweathermap/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import OpenWeatherMapQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"OpenWeatherMapQueryRun": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"OpenWeatherMapQueryRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/openweathermap/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/playwright/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.playwright.base import BaseBrowserTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"BaseBrowserTool": "langchain_community.tools.playwright.base"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseBrowserTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/playwright/base.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/powerbi/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
InfoPowerBITool,
ListPowerBITool,
QueryPowerBITool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"QueryPowerBITool": "langchain_community.tools",
"InfoPowerBITool": "langchain_community.tools",
"ListPowerBITool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"InfoPowerBITool",
"ListPowerBITool",
"QueryPowerBITool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/powerbi/tool.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/pubmed/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import PubmedQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"PubmedQueryRun": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PubmedQueryRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/pubmed/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/reddit_search/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import RedditSearchRun, RedditSearchSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedditSearchSchema": "langchain_community.tools",
"RedditSearchRun": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RedditSearchRun",
"RedditSearchSchema",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/reddit_search/tool.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/requests/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseRequestsTool,
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseRequestsTool": "langchain_community.tools",
"RequestsGetTool": "langchain_community.tools",
"RequestsPostTool": "langchain_community.tools",
"RequestsPatchTool": "langchain_community.tools",
"RequestsPutTool": "langchain_community.tools",
"RequestsDeleteTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseRequestsTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/requests/tool.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/scenexplain/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SceneXplainTool
from langchain_community.tools.scenexplain.tool import SceneXplainInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SceneXplainInput": "langchain_community.tools.scenexplain.tool",
"SceneXplainTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SceneXplainInput",
"SceneXplainTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/scenexplain/tool.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/searchapi/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SearchAPIResults, SearchAPIRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearchAPIRun": "langchain_community.tools",
"SearchAPIResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SearchAPIResults",
"SearchAPIRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/searchapi/tool.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/searx_search/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SearxSearchResults, SearxSearchRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearxSearchRun": "langchain_community.tools",
"SearxSearchResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SearxSearchResults",
"SearxSearchRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/searx_search/tool.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/shell/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ShellTool
from langchain_community.tools.shell.tool import ShellInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ShellInput": "langchain_community.tools.shell.tool",
"ShellTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ShellInput",
"ShellTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/shell/tool.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/slack/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.slack.base import SlackBaseTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"SlackBaseTool": "langchain_community.tools.slack.base"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SlackBaseTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/slack/base.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/slack/get_message.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SlackGetMessage
from langchain_community.tools.slack.get_message import SlackGetMessageSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SlackGetMessageSchema": "langchain_community.tools.slack.get_message",
"SlackGetMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SlackGetMessage",
"SlackGetMessageSchema",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/slack/get_message.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/slack/send_message.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SlackSendMessage
from langchain_community.tools.slack.send_message import SendMessageSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SendMessageSchema": "langchain_community.tools.slack.send_message",
"SlackSendMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SendMessageSchema",
"SlackSendMessage",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/slack/send_message.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/sleep/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SleepTool
from langchain_community.tools.sleep.tool import SleepInput
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SleepInput": "langchain_community.tools.sleep.tool",
"SleepTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SleepInput",
"SleepTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/sleep/tool.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/spark_sql/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseSparkSQLTool,
InfoSparkSQLTool,
ListSparkSQLTool,
QueryCheckerTool,
QuerySparkSQLTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSparkSQLTool": "langchain_community.tools",
"QuerySparkSQLTool": "langchain_community.tools",
"InfoSparkSQLTool": "langchain_community.tools",
"ListSparkSQLTool": "langchain_community.tools",
"QueryCheckerTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSparkSQLTool",
"InfoSparkSQLTool",
"ListSparkSQLTool",
"QueryCheckerTool",
"QuerySparkSQLTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/spark_sql/tool.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/sql_database/prompt.py | """For backwards compatibility."""
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
_importer = create_importer(
__package__,
deprecated_lookups={
"QUERY_CHECKER": "langchain_community.tools.sql_database.prompt",
},
)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _importer(name)
__all__ = ["QUERY_CHECKER"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/sql_database/prompt.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/sql_database/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseSQLDatabaseTool,
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSQLDatabaseTool": "langchain_community.tools",
"QuerySQLDataBaseTool": "langchain_community.tools",
"InfoSQLDatabaseTool": "langchain_community.tools",
"ListSQLDatabaseTool": "langchain_community.tools",
"QuerySQLCheckerTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSQLDatabaseTool",
"InfoSQLDatabaseTool",
"ListSQLDatabaseTool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/sql_database/tool.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/stackexchange/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import StackExchangeTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"StackExchangeTool": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"StackExchangeTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/stackexchange/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/steam/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SteamWebAPIQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"SteamWebAPIQueryRun": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SteamWebAPIQueryRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/steam/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/steamship_image_generation/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SteamshipImageGenerationTool
from langchain_community.tools.steamship_image_generation.tool import ModelName
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ModelName": "langchain_community.tools.steamship_image_generation.tool",
"SteamshipImageGenerationTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ModelName",
"SteamshipImageGenerationTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/steamship_image_generation/tool.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/tavily_search/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.tavily_search.tool import (
TavilyAnswer,
TavilyInput,
TavilySearchResults,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TavilyInput": "langchain_community.tools.tavily_search.tool",
"TavilySearchResults": "langchain_community.tools.tavily_search.tool",
"TavilyAnswer": "langchain_community.tools.tavily_search.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TavilyAnswer",
"TavilyInput",
"TavilySearchResults",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/tavily_search/tool.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/vectorstore/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"VectorStoreQATool": "langchain_community.tools",
"VectorStoreQAWithSourcesTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/vectorstore/tool.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/wikipedia/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import WikipediaQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"WikipediaQueryRun": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"WikipediaQueryRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/wikipedia/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/wolfram_alpha/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import WolframAlphaQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"WolframAlphaQueryRun": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"WolframAlphaQueryRun",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/wolfram_alpha/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/youtube/search.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import YouTubeSearchTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"YouTubeSearchTool": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"YouTubeSearchTool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/youtube/search.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/tools/zapier/tool.py | """This module provides dynamic access to deprecated Zapier tools in LangChain.
It supports backward compatibility by forwarding references such as
`ZapierNLAListActions` and `ZapierNLARunAction` to their updated locations
in the `langchain_community.tools` package.
Developers using older import paths will continue to function, while LangChain
internally redirects access to the newer, supported module structure.
"""
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierNLARunAction": "langchain_community.tools",
"ZapierNLAListActions": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Dynamically retrieve attributes from the updated module path.
This method is used to resolve deprecated attribute imports
at runtime and forward them to their new locations.
Args:
name: The name of the attribute to import.
Returns:
The resolved attribute from the appropriate updated module.
"""
return _import_attribute(name)
__all__ = [
"ZapierNLAListActions",
"ZapierNLARunAction",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/tools/zapier/tool.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/anthropic.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"get_num_tokens_anthropic": "langchain_community.utilities.anthropic",
"get_token_ids_anthropic": "langchain_community.utilities.anthropic",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"get_num_tokens_anthropic",
"get_token_ids_anthropic",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/anthropic.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/arcee.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import ArceeWrapper
from langchain_community.utilities.arcee import (
ArceeDocument,
ArceeDocumentAdapter,
ArceeDocumentSource,
ArceeRoute,
DALMFilter,
DALMFilterType,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ArceeRoute": "langchain_community.utilities.arcee",
"DALMFilterType": "langchain_community.utilities.arcee",
"DALMFilter": "langchain_community.utilities.arcee",
"ArceeDocumentSource": "langchain_community.utilities.arcee",
"ArceeDocument": "langchain_community.utilities.arcee",
"ArceeDocumentAdapter": "langchain_community.utilities.arcee",
"ArceeWrapper": "langchain_community.utilities",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ArceeDocument",
"ArceeDocumentAdapter",
"ArceeDocumentSource",
"ArceeRoute",
"ArceeWrapper",
"DALMFilter",
"DALMFilterType",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/arcee.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/arxiv.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import ArxivAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ArxivAPIWrapper": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ArxivAPIWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/arxiv.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/bibtex.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import BibtexparserWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"BibtexparserWrapper": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BibtexparserWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/bibtex.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/brave_search.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import BraveSearchWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"BraveSearchWrapper": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BraveSearchWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/brave_search.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/github.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.github import GitHubAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"GitHubAPIWrapper": "langchain_community.utilities.github"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GitHubAPIWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/github.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/max_compute.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import MaxComputeAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"MaxComputeAPIWrapper": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MaxComputeAPIWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/max_compute.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/opaqueprompts.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.opaqueprompts import desanitize, sanitize
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"sanitize": "langchain_community.utilities.opaqueprompts",
"desanitize": "langchain_community.utilities.opaqueprompts",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"desanitize",
"sanitize",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/opaqueprompts.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/openapi.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import OpenAPISpec
from langchain_community.utilities.openapi import HTTPVerb
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"HTTPVerb": "langchain_community.utilities.openapi",
"OpenAPISpec": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"HTTPVerb",
"OpenAPISpec",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/openapi.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/outline.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import OutlineAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"OutlineAPIWrapper": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"OutlineAPIWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/outline.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/pubmed.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import PubMedAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"PubMedAPIWrapper": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PubMedAPIWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/pubmed.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/redis.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.redis import (
TokenEscaper,
check_redis_module_exist,
get_client,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TokenEscaper": "langchain_community.utilities.redis",
"check_redis_module_exist": "langchain_community.utilities.redis",
"get_client": "langchain_community.utilities.redis",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TokenEscaper",
"check_redis_module_exist",
"get_client",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/redis.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/requests.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import Requests, RequestsWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Requests": "langchain_community.utilities",
"RequestsWrapper": "langchain_community.utilities",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Requests",
"RequestsWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/requests.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/serpapi.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import SerpAPIWrapper
from langchain_community.utilities.serpapi import HiddenPrints
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"HiddenPrints": "langchain_community.utilities.serpapi",
"SerpAPIWrapper": "langchain_community.utilities",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"HiddenPrints",
"SerpAPIWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/serpapi.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/sql_database.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import SQLDatabase
from langchain_community.utilities.sql_database import truncate_word
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"truncate_word": "langchain_community.utilities.sql_database",
"SQLDatabase": "langchain_community.utilities",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SQLDatabase",
"truncate_word",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/sql_database.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/tensorflow_datasets.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import TensorflowDatasets
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"TensorflowDatasets": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TensorflowDatasets",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/tensorflow_datasets.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/vertexai.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.vertexai import (
create_retry_decorator,
get_client_info,
init_vertexai,
raise_vertex_import_error,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_retry_decorator": "langchain_community.utilities.vertexai",
"raise_vertex_import_error": "langchain_community.utilities.vertexai",
"init_vertexai": "langchain_community.utilities.vertexai",
"get_client_info": "langchain_community.utilities.vertexai",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_retry_decorator",
"get_client_info",
"init_vertexai",
"raise_vertex_import_error",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/vertexai.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utilities/wikipedia.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import WikipediaAPIWrapper
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"WikipediaAPIWrapper": "langchain_community.utilities"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"WikipediaAPIWrapper",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utilities/wikipedia.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utils/ernie_functions.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utils.ernie_functions import (
FunctionDescription,
ToolDescription,
convert_pydantic_to_ernie_function,
convert_pydantic_to_ernie_tool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FunctionDescription": "langchain_community.utils.ernie_functions",
"ToolDescription": "langchain_community.utils.ernie_functions",
"convert_pydantic_to_ernie_function": "langchain_community.utils.ernie_functions",
"convert_pydantic_to_ernie_tool": "langchain_community.utils.ernie_functions",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FunctionDescription",
"ToolDescription",
"convert_pydantic_to_ernie_function",
"convert_pydantic_to_ernie_tool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utils/ernie_functions.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/utils/openai.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.utils.openai import is_openai_v1
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"is_openai_v1": "langchain_community.utils.openai"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"is_openai_v1",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/utils/openai.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/astradb.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import AstraDB
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"AstraDB": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AstraDB",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/astradb.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/cassandra.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Cassandra
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Cassandra": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Cassandra",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/cassandra.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/chroma.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Chroma
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Chroma": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Chroma",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/chroma.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/clarifai.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Clarifai
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Clarifai": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Clarifai",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/clarifai.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/dashvector.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import DashVector
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"DashVector": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DashVector",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/dashvector.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/databricks_vector_search.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import DatabricksVectorSearch
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"DatabricksVectorSearch": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DatabricksVectorSearch",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/databricks_vector_search.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/deeplake.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import DeepLake
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"DeepLake": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DeepLake",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/deeplake.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/dingo.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Dingo
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Dingo": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Dingo",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/dingo.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/docarray/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.docarray.base import DocArrayIndex
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"DocArrayIndex": "langchain_community.vectorstores.docarray.base"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DocArrayIndex",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/docarray/base.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/docarray/in_memory.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import DocArrayInMemorySearch
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"DocArrayInMemorySearch": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DocArrayInMemorySearch",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/docarray/in_memory.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/elasticsearch.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ElasticsearchStore
from langchain_community.vectorstores.elasticsearch import (
ApproxRetrievalStrategy,
BaseRetrievalStrategy,
ExactRetrievalStrategy,
SparseRetrievalStrategy,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ApproxRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ExactRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"SparseRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ElasticsearchStore": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ApproxRetrievalStrategy",
"BaseRetrievalStrategy",
"ElasticsearchStore",
"ExactRetrievalStrategy",
"SparseRetrievalStrategy",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/elasticsearch.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/llm_rails.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import LLMRails
from langchain_community.vectorstores.llm_rails import LLMRailsRetriever
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LLMRails": "langchain_community.vectorstores",
"LLMRailsRetriever": "langchain_community.vectorstores.llm_rails",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LLMRails",
"LLMRailsRetriever",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/llm_rails.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/milvus.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Milvus
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Milvus": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Milvus",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/milvus.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/mongodb_atlas.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"MongoDBAtlasVectorSearch": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MongoDBAtlasVectorSearch",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/mongodb_atlas.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/myscale.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import MyScale, MyScaleSettings
from langchain_community.vectorstores.myscale import MyScaleWithoutJSON
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MyScaleSettings": "langchain_community.vectorstores",
"MyScale": "langchain_community.vectorstores",
"MyScaleWithoutJSON": "langchain_community.vectorstores.myscale",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MyScale",
"MyScaleSettings",
"MyScaleWithoutJSON",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/myscale.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/pgvector.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import PGVector
from langchain_community.vectorstores.pgvector import DistanceStrategy
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DistanceStrategy": "langchain_community.vectorstores.pgvector",
"PGVector": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DistanceStrategy",
"PGVector",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/pgvector.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/pinecone.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Pinecone
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Pinecone": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Pinecone",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/pinecone.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/qdrant.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Qdrant
from langchain_community.vectorstores.qdrant import QdrantException
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"QdrantException": "langchain_community.vectorstores.qdrant",
"Qdrant": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Qdrant",
"QdrantException",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/qdrant.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/redis/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Redis
from langchain_community.vectorstores.redis.base import (
RedisVectorStoreRetriever,
check_index_exists,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"check_index_exists": "langchain_community.vectorstores.redis.base",
"Redis": "langchain_community.vectorstores",
"RedisVectorStoreRetriever": "langchain_community.vectorstores.redis.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Redis",
"RedisVectorStoreRetriever",
"check_index_exists",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/redis/base.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/redis/schema.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.redis.schema import (
FlatVectorField,
HNSWVectorField,
NumericFieldSchema,
RedisDistanceMetric,
RedisField,
RedisModel,
RedisVectorField,
TagFieldSchema,
TextFieldSchema,
read_schema,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedisDistanceMetric": "langchain_community.vectorstores.redis.schema",
"RedisField": "langchain_community.vectorstores.redis.schema",
"TextFieldSchema": "langchain_community.vectorstores.redis.schema",
"TagFieldSchema": "langchain_community.vectorstores.redis.schema",
"NumericFieldSchema": "langchain_community.vectorstores.redis.schema",
"RedisVectorField": "langchain_community.vectorstores.redis.schema",
"FlatVectorField": "langchain_community.vectorstores.redis.schema",
"HNSWVectorField": "langchain_community.vectorstores.redis.schema",
"RedisModel": "langchain_community.vectorstores.redis.schema",
"read_schema": "langchain_community.vectorstores.redis.schema",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FlatVectorField",
"HNSWVectorField",
"NumericFieldSchema",
"RedisDistanceMetric",
"RedisField",
"RedisModel",
"RedisVectorField",
"TagFieldSchema",
"TextFieldSchema",
"read_schema",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/redis/schema.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/rocksetdb.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Rockset
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Rockset": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Rockset",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/rocksetdb.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/singlestoredb.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import SingleStoreDB
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"SingleStoreDB": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SingleStoreDB",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/singlestoredb.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/supabase.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import SupabaseVectorStore
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"SupabaseVectorStore": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SupabaseVectorStore",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/supabase.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/tencentvectordb.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import TencentVectorDB
from langchain_community.vectorstores.tencentvectordb import (
ConnectionParams,
IndexParams,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ConnectionParams": "langchain_community.vectorstores.tencentvectordb",
"IndexParams": "langchain_community.vectorstores.tencentvectordb",
"TencentVectorDB": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ConnectionParams",
"IndexParams",
"TencentVectorDB",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/tencentvectordb.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/timescalevector.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import TimescaleVector
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"TimescaleVector": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TimescaleVector",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/timescalevector.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/utils.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.utils import (
DistanceStrategy,
filter_complex_metadata,
maximal_marginal_relevance,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DistanceStrategy": "langchain_community.vectorstores.utils",
"maximal_marginal_relevance": "langchain_community.vectorstores.utils",
"filter_complex_metadata": "langchain_community.vectorstores.utils",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DistanceStrategy",
"filter_complex_metadata",
"maximal_marginal_relevance",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/utils.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/vectara.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Vectara
from langchain_community.vectorstores.vectara import VectaraRetriever
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Vectara": "langchain_community.vectorstores",
"VectaraRetriever": "langchain_community.vectorstores.vectara",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Vectara",
"VectaraRetriever",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/vectara.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/weaviate.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Weaviate
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Weaviate": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Weaviate",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/weaviate.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/xata.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.xata import XataVectorStore
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"XataVectorStore": "langchain_community.vectorstores.xata"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"XataVectorStore",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/xata.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/zep.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ZepVectorStore
from langchain_community.vectorstores.zep import CollectionConfig
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CollectionConfig": "langchain_community.vectorstores.zep",
"ZepVectorStore": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CollectionConfig",
"ZepVectorStore",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/zep.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/vectorstores/zilliz.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Zilliz
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Zilliz": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Zilliz",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/vectorstores/zilliz.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/tool_call_limit.py | """Tool call limit middleware for agents."""
from __future__ import annotations
from typing import TYPE_CHECKING, Annotated, Any, Literal
from langchain_core.messages import AIMessage, ToolCall, ToolMessage
from langgraph.channels.untracked_value import UntrackedValue
from langgraph.typing import ContextT
from typing_extensions import NotRequired, override
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
PrivateStateAttr,
ResponseT,
hook_config,
)
if TYPE_CHECKING:
from langgraph.runtime import Runtime
ExitBehavior = Literal["continue", "error", "end"]
"""How to handle execution when tool call limits are exceeded.
- `'continue'`: Block exceeded tools with error messages, let other tools continue
(default)
- `'error'`: Raise a `ToolCallLimitExceededError` exception
- `'end'`: Stop execution immediately, injecting a `ToolMessage` and an `AIMessage` for
the single tool call that exceeded the limit. Raises `NotImplementedError` if there
are other pending tool calls (due to parallel tool calling).
"""
class ToolCallLimitState(AgentState[ResponseT]):
"""State schema for `ToolCallLimitMiddleware`.
Extends `AgentState` with tool call tracking fields.
The count fields are dictionaries mapping tool names to execution counts. This
allows multiple middleware instances to track different tools independently. The
special key `'__all__'` is used for tracking all tool calls globally.
Type Parameters:
ResponseT: The type of the structured response. Defaults to `Any`.
"""
thread_tool_call_count: NotRequired[Annotated[dict[str, int], PrivateStateAttr]]
run_tool_call_count: NotRequired[Annotated[dict[str, int], UntrackedValue, PrivateStateAttr]]
def _build_tool_message_content(tool_name: str | None) -> str:
"""Build the error message content for `ToolMessage` when limit is exceeded.
This message is sent to the model, so it should not reference thread/run concepts
that the model has no notion of.
Args:
tool_name: Tool name being limited (if specific tool), or `None` for all tools.
Returns:
A concise message instructing the model not to call the tool again.
"""
# Always instruct the model not to call again, regardless of which limit was hit
if tool_name:
return f"Tool call limit exceeded. Do not call '{tool_name}' again."
return "Tool call limit exceeded. Do not make additional tool calls."
def _build_final_ai_message_content(
thread_count: int,
run_count: int,
thread_limit: int | None,
run_limit: int | None,
tool_name: str | None,
) -> str:
"""Build the final AI message content for `'end'` behavior.
This message is displayed to the user, so it should include detailed information
about which limits were exceeded.
Args:
thread_count: Current thread tool call count.
run_count: Current run tool call count.
thread_limit: Thread tool call limit (if set).
run_limit: Run tool call limit (if set).
tool_name: Tool name being limited (if specific tool), or `None` for all tools.
Returns:
A formatted message describing which limits were exceeded.
"""
tool_desc = f"'{tool_name}' tool" if tool_name else "Tool"
exceeded_limits = []
if thread_limit is not None and thread_count > thread_limit:
exceeded_limits.append(f"thread limit exceeded ({thread_count}/{thread_limit} calls)")
if run_limit is not None and run_count > run_limit:
exceeded_limits.append(f"run limit exceeded ({run_count}/{run_limit} calls)")
limits_text = " and ".join(exceeded_limits)
return f"{tool_desc} call limit reached: {limits_text}."
class ToolCallLimitExceededError(Exception):
"""Exception raised when tool call limits are exceeded.
This exception is raised when the configured exit behavior is `'error'` and either
the thread or run tool call limit has been exceeded.
"""
def __init__(
self,
thread_count: int,
run_count: int,
thread_limit: int | None,
run_limit: int | None,
tool_name: str | None = None,
) -> None:
"""Initialize the exception with call count information.
Args:
thread_count: Current thread tool call count.
run_count: Current run tool call count.
thread_limit: Thread tool call limit (if set).
run_limit: Run tool call limit (if set).
tool_name: Tool name being limited (if specific tool), or None for all tools.
"""
self.thread_count = thread_count
self.run_count = run_count
self.thread_limit = thread_limit
self.run_limit = run_limit
self.tool_name = tool_name
msg = _build_final_ai_message_content(
thread_count, run_count, thread_limit, run_limit, tool_name
)
super().__init__(msg)
class ToolCallLimitMiddleware(AgentMiddleware[ToolCallLimitState[ResponseT], ContextT, ResponseT]):
"""Track tool call counts and enforces limits during agent execution.
This middleware monitors the number of tool calls made and can terminate or
restrict execution when limits are exceeded. It supports both thread-level
(persistent across runs) and run-level (per invocation) call counting.
Configuration:
- `exit_behavior`: How to handle when limits are exceeded
- `'continue'`: Block exceeded tools, let execution continue (default)
- `'error'`: Raise an exception
- `'end'`: Stop immediately with a `ToolMessage` + AI message for the single
tool call that exceeded the limit (raises `NotImplementedError` if there
are other pending tool calls (due to parallel tool calling).
Examples:
!!! example "Continue execution with blocked tools (default)"
```python
from langchain.agents.middleware.tool_call_limit import ToolCallLimitMiddleware
from langchain.agents import create_agent
# Block exceeded tools but let other tools and model continue
limiter = ToolCallLimitMiddleware(
thread_limit=20,
run_limit=10,
exit_behavior="continue", # default
)
agent = create_agent("openai:gpt-4o", middleware=[limiter])
```
!!! example "Stop immediately when limit exceeded"
```python
# End execution immediately with an AI message
limiter = ToolCallLimitMiddleware(run_limit=5, exit_behavior="end")
agent = create_agent("openai:gpt-4o", middleware=[limiter])
```
!!! example "Raise exception on limit"
```python
# Strict limit with exception handling
limiter = ToolCallLimitMiddleware(
tool_name="search", thread_limit=5, exit_behavior="error"
)
agent = create_agent("openai:gpt-4o", middleware=[limiter])
try:
result = await agent.invoke({"messages": [HumanMessage("Task")]})
except ToolCallLimitExceededError as e:
print(f"Search limit exceeded: {e}")
```
"""
state_schema = ToolCallLimitState # type: ignore[assignment]
def __init__(
self,
*,
tool_name: str | None = None,
thread_limit: int | None = None,
run_limit: int | None = None,
exit_behavior: ExitBehavior = "continue",
) -> None:
"""Initialize the tool call limit middleware.
Args:
tool_name: Name of the specific tool to limit. If `None`, limits apply
to all tools.
thread_limit: Maximum number of tool calls allowed per thread.
`None` means no limit.
run_limit: Maximum number of tool calls allowed per run.
`None` means no limit.
exit_behavior: How to handle when limits are exceeded.
- `'continue'`: Block exceeded tools with error messages, let other
tools continue. Model decides when to end.
- `'error'`: Raise a `ToolCallLimitExceededError` exception
- `'end'`: Stop execution immediately with a `ToolMessage` + AI message
for the single tool call that exceeded the limit. Raises
`NotImplementedError` if there are multiple parallel tool
calls to other tools or multiple pending tool calls.
Raises:
ValueError: If both limits are `None`, if `exit_behavior` is invalid,
or if `run_limit` exceeds `thread_limit`.
"""
super().__init__()
if thread_limit is None and run_limit is None:
msg = "At least one limit must be specified (thread_limit or run_limit)"
raise ValueError(msg)
valid_behaviors = ("continue", "error", "end")
if exit_behavior not in valid_behaviors:
msg = f"Invalid exit_behavior: {exit_behavior!r}. Must be one of {valid_behaviors}"
raise ValueError(msg)
if thread_limit is not None and run_limit is not None and run_limit > thread_limit:
msg = (
f"run_limit ({run_limit}) cannot exceed thread_limit ({thread_limit}). "
"The run limit should be less than or equal to the thread limit."
)
raise ValueError(msg)
self.tool_name = tool_name
self.thread_limit = thread_limit
self.run_limit = run_limit
self.exit_behavior = exit_behavior
@property
def name(self) -> str:
"""The name of the middleware instance.
Includes the tool name if specified to allow multiple instances
of this middleware with different tool names.
"""
base_name = self.__class__.__name__
if self.tool_name:
return f"{base_name}[{self.tool_name}]"
return base_name
def _would_exceed_limit(self, thread_count: int, run_count: int) -> bool:
"""Check if incrementing the counts would exceed any configured limit.
Args:
thread_count: Current thread call count.
run_count: Current run call count.
Returns:
True if either limit would be exceeded by one more call.
"""
return (self.thread_limit is not None and thread_count + 1 > self.thread_limit) or (
self.run_limit is not None and run_count + 1 > self.run_limit
)
def _matches_tool_filter(self, tool_call: ToolCall) -> bool:
"""Check if a tool call matches this middleware's tool filter.
Args:
tool_call: The tool call to check.
Returns:
True if this middleware should track this tool call.
"""
return self.tool_name is None or tool_call["name"] == self.tool_name
def _separate_tool_calls(
self, tool_calls: list[ToolCall], thread_count: int, run_count: int
) -> tuple[list[ToolCall], list[ToolCall], int, int]:
"""Separate tool calls into allowed and blocked based on limits.
Args:
tool_calls: List of tool calls to evaluate.
thread_count: Current thread call count.
run_count: Current run call count.
Returns:
Tuple of `(allowed_calls, blocked_calls, final_thread_count,
final_run_count)`.
"""
allowed_calls: list[ToolCall] = []
blocked_calls: list[ToolCall] = []
temp_thread_count = thread_count
temp_run_count = run_count
for tool_call in tool_calls:
if not self._matches_tool_filter(tool_call):
continue
if self._would_exceed_limit(temp_thread_count, temp_run_count):
blocked_calls.append(tool_call)
else:
allowed_calls.append(tool_call)
temp_thread_count += 1
temp_run_count += 1
return allowed_calls, blocked_calls, temp_thread_count, temp_run_count
@hook_config(can_jump_to=["end"])
@override
def after_model(
self,
state: ToolCallLimitState[ResponseT],
runtime: Runtime[ContextT],
) -> dict[str, Any] | None:
"""Increment tool call counts after a model call and check limits.
Args:
state: The current agent state.
runtime: The langgraph runtime.
Returns:
State updates with incremented tool call counts. If limits are exceeded
and exit_behavior is `'end'`, also includes a jump to end with a
`ToolMessage` and AI message for the single exceeded tool call.
Raises:
ToolCallLimitExceededError: If limits are exceeded and `exit_behavior`
is `'error'`.
NotImplementedError: If limits are exceeded, `exit_behavior` is `'end'`,
and there are multiple tool calls.
"""
# Get the last AIMessage to check for tool calls
messages = state.get("messages", [])
if not messages:
return None
# Find the last AIMessage
last_ai_message = None
for message in reversed(messages):
if isinstance(message, AIMessage):
last_ai_message = message
break
if not last_ai_message or not last_ai_message.tool_calls:
return None
# Get the count key for this middleware instance
count_key = self.tool_name or "__all__"
# Get current counts
thread_counts = state.get("thread_tool_call_count", {}).copy()
run_counts = state.get("run_tool_call_count", {}).copy()
current_thread_count = thread_counts.get(count_key, 0)
current_run_count = run_counts.get(count_key, 0)
# Separate tool calls into allowed and blocked
allowed_calls, blocked_calls, new_thread_count, new_run_count = self._separate_tool_calls(
last_ai_message.tool_calls, current_thread_count, current_run_count
)
# Update counts to include only allowed calls for thread count
# (blocked calls don't count towards thread-level tracking)
# But run count includes blocked calls since they were attempted in this run
thread_counts[count_key] = new_thread_count
run_counts[count_key] = new_run_count + len(blocked_calls)
# If no tool calls are blocked, just update counts
if not blocked_calls:
if allowed_calls:
return {
"thread_tool_call_count": thread_counts,
"run_tool_call_count": run_counts,
}
return None
# Get final counts for building messages
final_thread_count = thread_counts[count_key]
final_run_count = run_counts[count_key]
# Handle different exit behaviors
if self.exit_behavior == "error":
# Use hypothetical thread count to show which limit was exceeded
hypothetical_thread_count = final_thread_count + len(blocked_calls)
raise ToolCallLimitExceededError(
thread_count=hypothetical_thread_count,
run_count=final_run_count,
thread_limit=self.thread_limit,
run_limit=self.run_limit,
tool_name=self.tool_name,
)
# Build tool message content (sent to model - no thread/run details)
tool_msg_content = _build_tool_message_content(self.tool_name)
# Inject artificial error ToolMessages for blocked tool calls
artificial_messages: list[ToolMessage | AIMessage] = [
ToolMessage(
content=tool_msg_content,
tool_call_id=tool_call["id"],
name=tool_call.get("name"),
status="error",
)
for tool_call in blocked_calls
]
if self.exit_behavior == "end":
# Check if there are tool calls to other tools that would continue executing
other_tools = [
tc
for tc in last_ai_message.tool_calls
if self.tool_name is not None and tc["name"] != self.tool_name
]
if other_tools:
tool_names = ", ".join({tc["name"] for tc in other_tools})
msg = (
f"Cannot end execution with other tool calls pending. "
f"Found calls to: {tool_names}. Use 'continue' or 'error' behavior instead."
)
raise NotImplementedError(msg)
# Build final AI message content (displayed to user - includes thread/run details)
# Use hypothetical thread count (what it would have been if call wasn't blocked)
# to show which limit was actually exceeded
hypothetical_thread_count = final_thread_count + len(blocked_calls)
final_msg_content = _build_final_ai_message_content(
hypothetical_thread_count,
final_run_count,
self.thread_limit,
self.run_limit,
self.tool_name,
)
artificial_messages.append(AIMessage(content=final_msg_content))
return {
"thread_tool_call_count": thread_counts,
"run_tool_call_count": run_counts,
"jump_to": "end",
"messages": artificial_messages,
}
# For exit_behavior="continue", return error messages to block exceeded tools
return {
"thread_tool_call_count": thread_counts,
"run_tool_call_count": run_counts,
"messages": artificial_messages,
}
@hook_config(can_jump_to=["end"])
async def aafter_model(
self,
state: ToolCallLimitState[ResponseT],
runtime: Runtime[ContextT],
) -> dict[str, Any] | None:
"""Async increment tool call counts after a model call and check limits.
Args:
state: The current agent state.
runtime: The langgraph runtime.
Returns:
State updates with incremented tool call counts. If limits are exceeded
and exit_behavior is `'end'`, also includes a jump to end with a
`ToolMessage` and AI message for the single exceeded tool call.
Raises:
ToolCallLimitExceededError: If limits are exceeded and `exit_behavior`
is `'error'`.
NotImplementedError: If limits are exceeded, `exit_behavior` is `'end'`,
and there are multiple tool calls.
"""
return self.after_model(state, runtime)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/tool_call_limit.py",
"license": "MIT License",
"lines": 394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/core/langchain_core/messages/block_translators/anthropic.py | """Derivations of standard content blocks from Anthropic content."""
import json
from collections.abc import Iterator
from typing import Any, cast
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_core.messages import content as types
def _populate_extras(
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
) -> types.ContentBlock:
"""Mutate a block, populating extras."""
if standard_block.get("type") == "non_standard":
return standard_block
for key, value in block.items():
if key not in known_fields:
if "extras" not in standard_block:
# Below type-ignores are because mypy thinks a non-standard block can
# get here, although we exclude them above.
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
standard_block["extras"][key] = value # type: ignore[typeddict-item]
return standard_block
def _convert_to_v1_from_anthropic_input(
content: list[types.ContentBlock],
) -> list[types.ContentBlock]:
"""Convert Anthropic format blocks to v1 format.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
field. This function attempts to unpack those blocks and convert any blocks that
might be Anthropic format to v1 ContentBlocks.
If conversion fails, the block is left as a `'non_standard'` block.
Args:
content: List of content blocks to process.
Returns:
Updated list with Anthropic blocks converted to v1 format.
"""
def _iter_blocks() -> Iterator[types.ContentBlock]:
blocks: list[dict[str, Any]] = [
cast("dict[str, Any]", block)
if block.get("type") != "non_standard"
else block["value"] # type: ignore[typeddict-item] # this is only non-standard blocks
for block in content
]
for block in blocks:
block_type = block.get("type")
if (
block_type == "document"
and "source" in block
and "type" in block["source"]
):
if block["source"]["type"] == "base64":
file_block: types.FileContentBlock = {
"type": "file",
"base64": block["source"]["data"],
"mime_type": block["source"]["media_type"],
}
_populate_extras(file_block, block, {"type", "source"})
yield file_block
elif block["source"]["type"] == "url":
file_block = {
"type": "file",
"url": block["source"]["url"],
}
_populate_extras(file_block, block, {"type", "source"})
yield file_block
elif block["source"]["type"] == "file":
file_block = {
"type": "file",
"id": block["source"]["file_id"],
}
_populate_extras(file_block, block, {"type", "source"})
yield file_block
elif block["source"]["type"] == "text":
plain_text_block: types.PlainTextContentBlock = {
"type": "text-plain",
"text": block["source"]["data"],
"mime_type": block.get("media_type", "text/plain"),
}
_populate_extras(plain_text_block, block, {"type", "source"})
yield plain_text_block
else:
yield {"type": "non_standard", "value": block}
elif (
block_type == "image"
and "source" in block
and "type" in block["source"]
):
if block["source"]["type"] == "base64":
image_block: types.ImageContentBlock = {
"type": "image",
"base64": block["source"]["data"],
"mime_type": block["source"]["media_type"],
}
_populate_extras(image_block, block, {"type", "source"})
yield image_block
elif block["source"]["type"] == "url":
image_block = {
"type": "image",
"url": block["source"]["url"],
}
_populate_extras(image_block, block, {"type", "source"})
yield image_block
elif block["source"]["type"] == "file":
image_block = {
"type": "image",
"id": block["source"]["file_id"],
}
_populate_extras(image_block, block, {"type", "source"})
yield image_block
else:
yield {"type": "non_standard", "value": block}
elif block_type in types.KNOWN_BLOCK_TYPES:
yield cast("types.ContentBlock", block)
else:
yield {"type": "non_standard", "value": block}
return list(_iter_blocks())
def _convert_citation_to_v1(citation: dict[str, Any]) -> types.Annotation:
citation_type = citation.get("type")
if citation_type == "web_search_result_location":
url_citation: types.Citation = {
"type": "citation",
"cited_text": citation["cited_text"],
"url": citation["url"],
}
if title := citation.get("title"):
url_citation["title"] = title
known_fields = {"type", "cited_text", "url", "title", "index", "extras"}
for key, value in citation.items():
if key not in known_fields:
if "extras" not in url_citation:
url_citation["extras"] = {}
url_citation["extras"][key] = value
return url_citation
if citation_type in {
"char_location",
"content_block_location",
"page_location",
"search_result_location",
}:
document_citation: types.Citation = {
"type": "citation",
"cited_text": citation["cited_text"],
}
if "document_title" in citation:
document_citation["title"] = citation["document_title"]
elif title := citation.get("title"):
document_citation["title"] = title
known_fields = {
"type",
"cited_text",
"document_title",
"title",
"index",
"extras",
}
for key, value in citation.items():
if key not in known_fields:
if "extras" not in document_citation:
document_citation["extras"] = {}
document_citation["extras"][key] = value
return document_citation
return {
"type": "non_standard_annotation",
"value": citation,
}
def _convert_to_v1_from_anthropic(message: AIMessage) -> list[types.ContentBlock]:
"""Convert Anthropic message content to v1 format."""
if isinstance(message.content, str):
content: list[str | dict] = [{"type": "text", "text": message.content}]
else:
content = message.content
def _iter_blocks() -> Iterator[types.ContentBlock]:
for block in content:
if not isinstance(block, dict):
continue
block_type = block.get("type")
if block_type == "text":
if citations := block.get("citations"):
text_block: types.TextContentBlock = {
"type": "text",
"text": block.get("text", ""),
"annotations": [_convert_citation_to_v1(a) for a in citations],
}
else:
text_block = {"type": "text", "text": block["text"]}
if "index" in block:
text_block["index"] = block["index"]
yield text_block
elif block_type == "thinking":
reasoning_block: types.ReasoningContentBlock = {
"type": "reasoning",
"reasoning": block.get("thinking", ""),
}
if "index" in block:
reasoning_block["index"] = block["index"]
known_fields = {"type", "thinking", "index", "extras"}
for key in block:
if key not in known_fields:
if "extras" not in reasoning_block:
reasoning_block["extras"] = {}
reasoning_block["extras"][key] = block[key]
yield reasoning_block
elif block_type == "tool_use":
if (
isinstance(message, AIMessageChunk)
and len(message.tool_call_chunks) == 1
and message.chunk_position != "last"
):
# Isolated chunk
chunk = message.tool_call_chunks[0]
tool_call_chunk = types.ToolCallChunk(
name=chunk.get("name"),
id=chunk.get("id"),
args=chunk.get("args"),
type="tool_call_chunk",
)
if "caller" in block:
tool_call_chunk["extras"] = {"caller": block["caller"]}
index = chunk.get("index")
if index is not None:
tool_call_chunk["index"] = index
yield tool_call_chunk
else:
tool_call_block: types.ToolCall | None = None
# Non-streaming or gathered chunk
if len(message.tool_calls) == 1:
tool_call_block = {
"type": "tool_call",
"name": message.tool_calls[0]["name"],
"args": message.tool_calls[0]["args"],
"id": message.tool_calls[0].get("id"),
}
elif call_id := block.get("id"):
for tc in message.tool_calls:
if tc.get("id") == call_id:
tool_call_block = {
"type": "tool_call",
"name": tc["name"],
"args": tc["args"],
"id": tc.get("id"),
}
break
if not tool_call_block:
tool_call_block = {
"type": "tool_call",
"name": block.get("name", ""),
"args": block.get("input", {}),
"id": block.get("id", ""),
}
if "index" in block:
tool_call_block["index"] = block["index"]
if "caller" in block:
if "extras" not in tool_call_block:
tool_call_block["extras"] = {}
tool_call_block["extras"]["caller"] = block["caller"]
yield tool_call_block
elif block_type == "input_json_delta" and isinstance(
message, AIMessageChunk
):
if len(message.tool_call_chunks) == 1:
chunk = message.tool_call_chunks[0]
tool_call_chunk = types.ToolCallChunk(
name=chunk.get("name"),
id=chunk.get("id"),
args=chunk.get("args"),
type="tool_call_chunk",
)
index = chunk.get("index")
if index is not None:
tool_call_chunk["index"] = index
yield tool_call_chunk
else:
server_tool_call_chunk: types.ServerToolCallChunk = {
"type": "server_tool_call_chunk",
"args": block.get("partial_json", ""),
}
if "index" in block:
server_tool_call_chunk["index"] = block["index"]
yield server_tool_call_chunk
elif block_type == "server_tool_use":
if block.get("name") == "code_execution":
server_tool_use_name = "code_interpreter"
else:
server_tool_use_name = block.get("name", "")
if (
isinstance(message, AIMessageChunk)
and block.get("input") == {}
and "partial_json" not in block
and message.chunk_position != "last"
):
# First chunk in a stream
server_tool_call_chunk = {
"type": "server_tool_call_chunk",
"name": server_tool_use_name,
"args": "",
"id": block.get("id", ""),
}
if "index" in block:
server_tool_call_chunk["index"] = block["index"]
known_fields = {"type", "name", "input", "id", "index"}
_populate_extras(server_tool_call_chunk, block, known_fields)
yield server_tool_call_chunk
else:
server_tool_call: types.ServerToolCall = {
"type": "server_tool_call",
"name": server_tool_use_name,
"args": block.get("input", {}),
"id": block.get("id", ""),
}
if block.get("input") == {} and "partial_json" in block:
try:
input_ = json.loads(block["partial_json"])
if isinstance(input_, dict):
server_tool_call["args"] = input_
except json.JSONDecodeError:
pass
if "index" in block:
server_tool_call["index"] = block["index"]
known_fields = {
"type",
"name",
"input",
"partial_json",
"id",
"index",
}
_populate_extras(server_tool_call, block, known_fields)
yield server_tool_call
elif block_type == "mcp_tool_use":
if (
isinstance(message, AIMessageChunk)
and block.get("input") == {}
and "partial_json" not in block
and message.chunk_position != "last"
):
# First chunk in a stream
server_tool_call_chunk = {
"type": "server_tool_call_chunk",
"name": "remote_mcp",
"args": "",
"id": block.get("id", ""),
}
if "name" in block:
server_tool_call_chunk["extras"] = {"tool_name": block["name"]}
known_fields = {"type", "name", "input", "id", "index"}
_populate_extras(server_tool_call_chunk, block, known_fields)
if "index" in block:
server_tool_call_chunk["index"] = block["index"]
yield server_tool_call_chunk
else:
server_tool_call = {
"type": "server_tool_call",
"name": "remote_mcp",
"args": block.get("input", {}),
"id": block.get("id", ""),
}
if block.get("input") == {} and "partial_json" in block:
try:
input_ = json.loads(block["partial_json"])
if isinstance(input_, dict):
server_tool_call["args"] = input_
except json.JSONDecodeError:
pass
if "name" in block:
server_tool_call["extras"] = {"tool_name": block["name"]}
known_fields = {
"type",
"name",
"input",
"partial_json",
"id",
"index",
}
_populate_extras(server_tool_call, block, known_fields)
if "index" in block:
server_tool_call["index"] = block["index"]
yield server_tool_call
elif block_type and block_type.endswith("_tool_result"):
server_tool_result: types.ServerToolResult = {
"type": "server_tool_result",
"tool_call_id": block.get("tool_use_id", ""),
"status": "success",
"extras": {"block_type": block_type},
}
if output := block.get("content", []):
server_tool_result["output"] = output
if isinstance(output, dict) and output.get(
"error_code" # web_search, code_interpreter
):
server_tool_result["status"] = "error"
if block.get("is_error"): # mcp_tool_result
server_tool_result["status"] = "error"
if "index" in block:
server_tool_result["index"] = block["index"]
known_fields = {"type", "tool_use_id", "content", "is_error", "index"}
_populate_extras(server_tool_result, block, known_fields)
yield server_tool_result
else:
new_block: types.NonStandardContentBlock = {
"type": "non_standard",
"value": block,
}
if "index" in new_block["value"]:
new_block["index"] = new_block["value"].pop("index")
yield new_block
return list(_iter_blocks())
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message with Anthropic content.
Args:
message: The message to translate.
Returns:
The derived content blocks.
"""
return _convert_to_v1_from_anthropic(message)
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message chunk with Anthropic content.
Args:
message: The message chunk to translate.
Returns:
The derived content blocks.
"""
return _convert_to_v1_from_anthropic(message)
def _register_anthropic_translator() -> None:
"""Register the Anthropic translator with the central registry.
Run automatically when the module is imported.
"""
from langchain_core.messages.block_translators import ( # noqa: PLC0415
register_translator,
)
register_translator("anthropic", translate_content, translate_content_chunk)
_register_anthropic_translator()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/block_translators/anthropic.py",
"license": "MIT License",
"lines": 429,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/langchain_core/messages/block_translators/bedrock.py | """Derivations of standard content blocks from Bedrock content."""
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_core.messages import content as types
from langchain_core.messages.block_translators.anthropic import (
_convert_to_v1_from_anthropic,
)
def _convert_to_v1_from_bedrock(message: AIMessage) -> list[types.ContentBlock]:
"""Convert bedrock message content to v1 format."""
out = _convert_to_v1_from_anthropic(message)
content_tool_call_ids = {
block.get("id")
for block in out
if isinstance(block, dict) and block.get("type") == "tool_call"
}
for tool_call in message.tool_calls:
if (id_ := tool_call.get("id")) and id_ not in content_tool_call_ids:
tool_call_block: types.ToolCall = {
"type": "tool_call",
"id": id_,
"name": tool_call["name"],
"args": tool_call["args"],
}
if "index" in tool_call:
tool_call_block["index"] = tool_call["index"] # type: ignore[typeddict-item]
if "extras" in tool_call:
tool_call_block["extras"] = tool_call["extras"] # type: ignore[typeddict-item]
out.append(tool_call_block)
return out
def _convert_to_v1_from_bedrock_chunk(
message: AIMessageChunk,
) -> list[types.ContentBlock]:
"""Convert bedrock message chunk content to v1 format."""
if (
message.content == ""
and not message.additional_kwargs
and not message.tool_calls
):
# Bedrock outputs multiple chunks containing response metadata
return []
out = _convert_to_v1_from_anthropic(message)
if (
message.tool_call_chunks
and not message.content
and message.chunk_position != "last" # keep tool_calls if aggregated
):
for tool_call_chunk in message.tool_call_chunks:
tc: types.ToolCallChunk = {
"type": "tool_call_chunk",
"id": tool_call_chunk.get("id"),
"name": tool_call_chunk.get("name"),
"args": tool_call_chunk.get("args"),
}
if (idx := tool_call_chunk.get("index")) is not None:
tc["index"] = idx
out.append(tc)
return out
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message with Bedrock content.
Args:
message: The message to translate.
Returns:
The derived content blocks.
"""
if "claude" not in message.response_metadata.get("model_name", "").lower():
raise NotImplementedError # fall back to best-effort parsing
return _convert_to_v1_from_bedrock(message)
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message chunk with Bedrock content.
Args:
message: The message chunk to translate.
Returns:
The derived content blocks.
"""
# TODO: add model_name to all Bedrock chunks and update core merging logic
# to not append during aggregation. Then raise NotImplementedError here if
# not an Anthropic model to fall back to best-effort parsing.
return _convert_to_v1_from_bedrock_chunk(message)
def _register_bedrock_translator() -> None:
"""Register the bedrock translator with the central registry.
Run automatically when the module is imported.
"""
from langchain_core.messages.block_translators import ( # noqa: PLC0415
register_translator,
)
register_translator("bedrock", translate_content, translate_content_chunk)
_register_bedrock_translator()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/block_translators/bedrock.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/langchain_core/messages/block_translators/bedrock_converse.py | """Derivations of standard content blocks from Amazon (Bedrock Converse) content."""
import base64
from collections.abc import Iterator
from typing import Any, cast
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_core.messages import content as types
def _bytes_to_b64_str(bytes_: bytes) -> str:
return base64.b64encode(bytes_).decode("utf-8")
def _populate_extras(
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
) -> types.ContentBlock:
"""Mutate a block, populating extras."""
if standard_block.get("type") == "non_standard":
return standard_block
for key, value in block.items():
if key not in known_fields:
if "extras" not in standard_block:
# Below type-ignores are because mypy thinks a non-standard block can
# get here, although we exclude them above.
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
standard_block["extras"][key] = value # type: ignore[typeddict-item]
return standard_block
def _convert_to_v1_from_converse_input(
content: list[types.ContentBlock],
) -> list[types.ContentBlock]:
"""Convert Bedrock Converse format blocks to v1 format.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
field. This function attempts to unpack those blocks and convert any blocks that
might be Converse format to v1 ContentBlocks.
If conversion fails, the block is left as a `'non_standard'` block.
Args:
content: List of content blocks to process.
Returns:
Updated list with Converse blocks converted to v1 format.
"""
def _iter_blocks() -> Iterator[types.ContentBlock]:
blocks: list[dict[str, Any]] = [
cast("dict[str, Any]", block)
if block.get("type") != "non_standard"
else block["value"] # type: ignore[typeddict-item] # this is only non-standard blocks
for block in content
]
for block in blocks:
num_keys = len(block)
if num_keys == 1 and (text := block.get("text")):
yield {"type": "text", "text": text}
elif (
num_keys == 1
and (document := block.get("document"))
and isinstance(document, dict)
and "format" in document
):
if document.get("format") == "pdf":
if "bytes" in document.get("source", {}):
file_block: types.FileContentBlock = {
"type": "file",
"base64": _bytes_to_b64_str(document["source"]["bytes"]),
"mime_type": "application/pdf",
}
_populate_extras(file_block, document, {"format", "source"})
yield file_block
else:
yield {"type": "non_standard", "value": block}
elif document["format"] == "txt":
if "text" in document.get("source", {}):
plain_text_block: types.PlainTextContentBlock = {
"type": "text-plain",
"text": document["source"]["text"],
"mime_type": "text/plain",
}
_populate_extras(
plain_text_block, document, {"format", "source"}
)
yield plain_text_block
else:
yield {"type": "non_standard", "value": block}
else:
yield {"type": "non_standard", "value": block}
elif (
num_keys == 1
and (image := block.get("image"))
and isinstance(image, dict)
and "format" in image
):
if "bytes" in image.get("source", {}):
image_block: types.ImageContentBlock = {
"type": "image",
"base64": _bytes_to_b64_str(image["source"]["bytes"]),
"mime_type": f"image/{image['format']}",
}
_populate_extras(image_block, image, {"format", "source"})
yield image_block
else:
yield {"type": "non_standard", "value": block}
elif block.get("type") in types.KNOWN_BLOCK_TYPES:
yield cast("types.ContentBlock", block)
else:
yield {"type": "non_standard", "value": block}
return list(_iter_blocks())
def _convert_citation_to_v1(citation: dict[str, Any]) -> types.Annotation:
standard_citation: types.Citation = {"type": "citation"}
if "title" in citation:
standard_citation["title"] = citation["title"]
if (
(source_content := citation.get("source_content"))
and isinstance(source_content, list)
and all(isinstance(item, dict) for item in source_content)
):
standard_citation["cited_text"] = "".join(
item.get("text", "") for item in source_content
)
known_fields = {"type", "source_content", "title", "index", "extras"}
for key, value in citation.items():
if key not in known_fields:
if "extras" not in standard_citation:
standard_citation["extras"] = {}
standard_citation["extras"][key] = value
return standard_citation
def _convert_to_v1_from_converse(message: AIMessage) -> list[types.ContentBlock]:
"""Convert Bedrock Converse message content to v1 format."""
if (
message.content == ""
and not message.additional_kwargs
and not message.tool_calls
):
# Converse outputs multiple chunks containing response metadata
return []
if isinstance(message.content, str):
message.content = [{"type": "text", "text": message.content}]
def _iter_blocks() -> Iterator[types.ContentBlock]:
for block in message.content:
if not isinstance(block, dict):
continue
block_type = block.get("type")
if block_type == "text":
if citations := block.get("citations"):
text_block: types.TextContentBlock = {
"type": "text",
"text": block.get("text", ""),
"annotations": [_convert_citation_to_v1(a) for a in citations],
}
else:
text_block = {"type": "text", "text": block["text"]}
if "index" in block:
text_block["index"] = block["index"]
yield text_block
elif block_type == "reasoning_content":
reasoning_block: types.ReasoningContentBlock = {"type": "reasoning"}
if reasoning_content := block.get("reasoning_content"):
if reasoning := reasoning_content.get("text"):
reasoning_block["reasoning"] = reasoning
if signature := reasoning_content.get("signature"):
if "extras" not in reasoning_block:
reasoning_block["extras"] = {}
reasoning_block["extras"]["signature"] = signature
if "index" in block:
reasoning_block["index"] = block["index"]
known_fields = {"type", "reasoning_content", "index", "extras"}
for key in block:
if key not in known_fields:
if "extras" not in reasoning_block:
reasoning_block["extras"] = {}
reasoning_block["extras"][key] = block[key]
yield reasoning_block
elif block_type == "tool_use":
if (
isinstance(message, AIMessageChunk)
and len(message.tool_call_chunks) == 1
and message.chunk_position != "last"
):
# Isolated chunk
chunk = message.tool_call_chunks[0]
tool_call_chunk = types.ToolCallChunk(
name=chunk.get("name"),
id=chunk.get("id"),
args=chunk.get("args"),
type="tool_call_chunk",
)
index = chunk.get("index")
if index is not None:
tool_call_chunk["index"] = index
yield tool_call_chunk
else:
tool_call_block: types.ToolCall | None = None
# Non-streaming or gathered chunk
if len(message.tool_calls) == 1:
tool_call_block = {
"type": "tool_call",
"name": message.tool_calls[0]["name"],
"args": message.tool_calls[0]["args"],
"id": message.tool_calls[0].get("id"),
}
elif call_id := block.get("id"):
for tc in message.tool_calls:
if tc.get("id") == call_id:
tool_call_block = {
"type": "tool_call",
"name": tc["name"],
"args": tc["args"],
"id": tc.get("id"),
}
break
if not tool_call_block:
tool_call_block = {
"type": "tool_call",
"name": block.get("name", ""),
"args": block.get("input", {}),
"id": block.get("id", ""),
}
if "index" in block:
tool_call_block["index"] = block["index"]
yield tool_call_block
elif (
block_type == "input_json_delta"
and isinstance(message, AIMessageChunk)
and len(message.tool_call_chunks) == 1
):
chunk = message.tool_call_chunks[0]
tool_call_chunk = types.ToolCallChunk(
name=chunk.get("name"),
id=chunk.get("id"),
args=chunk.get("args"),
type="tool_call_chunk",
)
index = chunk.get("index")
if index is not None:
tool_call_chunk["index"] = index
yield tool_call_chunk
else:
new_block: types.NonStandardContentBlock = {
"type": "non_standard",
"value": block,
}
if "index" in new_block["value"]:
new_block["index"] = new_block["value"].pop("index")
yield new_block
return list(_iter_blocks())
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message with Bedrock Converse content.
Args:
message: The message to translate.
Returns:
The derived content blocks.
"""
return _convert_to_v1_from_converse(message)
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
"""Derive standard content blocks from a chunk with Bedrock Converse content.
Args:
message: The message chunk to translate.
Returns:
The derived content blocks.
"""
return _convert_to_v1_from_converse(message)
def _register_bedrock_converse_translator() -> None:
"""Register the Bedrock Converse translator with the central registry.
Run automatically when the module is imported.
"""
from langchain_core.messages.block_translators import ( # noqa: PLC0415
register_translator,
)
register_translator("bedrock_converse", translate_content, translate_content_chunk)
_register_bedrock_converse_translator()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/block_translators/bedrock_converse.py",
"license": "MIT License",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/langchain_core/messages/block_translators/google_genai.py | """Derivations of standard content blocks from Google (GenAI) content."""
import base64
import re
from collections.abc import Iterator
from typing import Any, cast
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_core.messages import content as types
from langchain_core.messages.content import Citation, create_citation
try:
import filetype # type: ignore[import-not-found]
_HAS_FILETYPE = True
except ImportError:
_HAS_FILETYPE = False
def _bytes_to_b64_str(bytes_: bytes) -> str:
"""Convert bytes to base64 encoded string."""
return base64.b64encode(bytes_).decode("utf-8")
def translate_grounding_metadata_to_citations(
grounding_metadata: dict[str, Any],
) -> list[Citation]:
"""Translate Google AI grounding metadata to LangChain Citations.
Args:
grounding_metadata: Google AI grounding metadata containing web search
queries, grounding chunks, and grounding supports.
Returns:
List of Citation content blocks derived from the grounding metadata.
Example:
>>> metadata = {
... "web_search_queries": ["UEFA Euro 2024 winner"],
... "grounding_chunks": [
... {
... "web": {
... "uri": "https://uefa.com/euro2024",
... "title": "UEFA Euro 2024 Results",
... }
... }
... ],
... "grounding_supports": [
... {
... "segment": {
... "start_index": 0,
... "end_index": 47,
... "text": "Spain won the UEFA Euro 2024 championship",
... },
... "grounding_chunk_indices": [0],
... }
... ],
... }
>>> citations = translate_grounding_metadata_to_citations(metadata)
>>> len(citations)
1
>>> citations[0]["url"]
'https://uefa.com/euro2024'
"""
if not grounding_metadata:
return []
grounding_chunks = grounding_metadata.get("grounding_chunks", [])
grounding_supports = grounding_metadata.get("grounding_supports", [])
web_search_queries = grounding_metadata.get("web_search_queries", [])
citations: list[Citation] = []
for support in grounding_supports:
segment = support.get("segment", {})
chunk_indices = support.get("grounding_chunk_indices", [])
start_index = segment.get("start_index")
end_index = segment.get("end_index")
cited_text = segment.get("text")
# Create a citation for each referenced chunk
for chunk_index in chunk_indices:
if chunk_index < len(grounding_chunks):
chunk = grounding_chunks[chunk_index]
# Handle web and maps grounding
web_info = chunk.get("web") or {}
maps_info = chunk.get("maps") or {}
# Extract citation info depending on source
url = maps_info.get("uri") or web_info.get("uri")
title = maps_info.get("title") or web_info.get("title")
# Note: confidence_scores is a legacy field from Gemini 2.0 and earlier
# that indicated confidence (0.0-1.0) for each grounding chunk.
#
# In Gemini 2.5+, this field is always None/empty and should be ignored.
extras_metadata = {
"web_search_queries": web_search_queries,
"grounding_chunk_index": chunk_index,
"confidence_scores": support.get("confidence_scores") or [],
}
# Add maps-specific metadata if present
if maps_info.get("placeId"):
extras_metadata["place_id"] = maps_info["placeId"]
citation = create_citation(
url=url,
title=title,
start_index=start_index,
end_index=end_index,
cited_text=cited_text,
google_ai_metadata=extras_metadata,
)
citations.append(citation)
return citations
def _convert_to_v1_from_genai_input(
content: list[types.ContentBlock],
) -> list[types.ContentBlock]:
"""Convert Google GenAI format blocks to v1 format.
Called when message isn't an `AIMessage` or `model_provider` isn't set on
`response_metadata`.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
field. This function attempts to unpack those blocks and convert any blocks that
might be GenAI format to v1 ContentBlocks.
If conversion fails, the block is left as a `'non_standard'` block.
Args:
content: List of content blocks to process.
Returns:
Updated list with GenAI blocks converted to v1 format.
"""
def _iter_blocks() -> Iterator[types.ContentBlock]:
blocks: list[dict[str, Any]] = [
cast("dict[str, Any]", block)
if block.get("type") != "non_standard"
else block["value"] # type: ignore[typeddict-item] # this is only non-standard blocks
for block in content
]
for block in blocks:
num_keys = len(block)
block_type = block.get("type")
if num_keys == 1 and (text := block.get("text")):
# This is probably a TextContentBlock
yield {"type": "text", "text": text}
elif (
num_keys == 1
and (document := block.get("document"))
and isinstance(document, dict)
and "format" in document
):
# Handle document format conversion
doc_format = document.get("format")
source = document.get("source", {})
if doc_format == "pdf" and "bytes" in source:
# PDF document with byte data
file_block: types.FileContentBlock = {
"type": "file",
"base64": source["bytes"]
if isinstance(source["bytes"], str)
else _bytes_to_b64_str(source["bytes"]),
"mime_type": "application/pdf",
}
# Preserve extra fields
extras = {
key: value
for key, value in document.items()
if key not in {"format", "source"}
}
if extras:
file_block["extras"] = extras
yield file_block
elif doc_format == "txt" and "text" in source:
# Text document
plain_text_block: types.PlainTextContentBlock = {
"type": "text-plain",
"text": source["text"],
"mime_type": "text/plain",
}
# Preserve extra fields
extras = {
key: value
for key, value in document.items()
if key not in {"format", "source"}
}
if extras:
plain_text_block["extras"] = extras
yield plain_text_block
else:
# Unknown document format
yield {"type": "non_standard", "value": block}
elif (
num_keys == 1
and (image := block.get("image"))
and isinstance(image, dict)
and "format" in image
):
# Handle image format conversion
img_format = image.get("format")
source = image.get("source", {})
if "bytes" in source:
# Image with byte data
image_block: types.ImageContentBlock = {
"type": "image",
"base64": source["bytes"]
if isinstance(source["bytes"], str)
else _bytes_to_b64_str(source["bytes"]),
"mime_type": f"image/{img_format}",
}
# Preserve extra fields
extras = {}
for key, value in image.items():
if key not in {"format", "source"}:
extras[key] = value
if extras:
image_block["extras"] = extras
yield image_block
else:
# Image without byte data
yield {"type": "non_standard", "value": block}
elif block_type == "file_data" and "file_uri" in block:
# Handle FileData URI-based content
uri_file_block: types.FileContentBlock = {
"type": "file",
"url": block["file_uri"],
}
if mime_type := block.get("mime_type"):
uri_file_block["mime_type"] = mime_type
yield uri_file_block
elif block_type == "function_call" and "name" in block:
# Handle function calls
tool_call_block: types.ToolCall = {
"type": "tool_call",
"name": block["name"],
"args": block.get("args", {}),
"id": block.get("id", ""),
}
yield tool_call_block
elif block_type == "executable_code":
server_tool_call_input: types.ServerToolCall = {
"type": "server_tool_call",
"name": "code_interpreter",
"args": {
"code": block.get("executable_code", ""),
"language": block.get("language", "python"),
},
"id": block.get("id", ""),
}
yield server_tool_call_input
elif block_type == "code_execution_result":
outcome = block.get("outcome", 1)
status = "success" if outcome == 1 else "error"
server_tool_result_input: types.ServerToolResult = {
"type": "server_tool_result",
"tool_call_id": block.get("tool_call_id", ""),
"status": status, # type: ignore[typeddict-item]
"output": block.get("code_execution_result", ""),
}
if outcome is not None:
server_tool_result_input["extras"] = {"outcome": outcome}
yield server_tool_result_input
elif block.get("type") in types.KNOWN_BLOCK_TYPES:
# We see a standard block type, so we just cast it, even if
# we don't fully understand it. This may be dangerous, but
# it's better than losing information.
yield cast("types.ContentBlock", block)
else:
# We don't understand this block at all.
yield {"type": "non_standard", "value": block}
return list(_iter_blocks())
def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
"""Convert Google GenAI message content to v1 format.
Calling `.content_blocks` on an `AIMessage` where `response_metadata.model_provider`
is set to `'google_genai'` will invoke this function to parse the content into
standard content blocks for returning.
Args:
message: The `AIMessage` or `AIMessageChunk` to convert.
Returns:
List of standard content blocks derived from the message content.
"""
if isinstance(message.content, str):
# String content -> TextContentBlock (only add if non-empty in case of audio)
string_blocks: list[types.ContentBlock] = []
if message.content:
string_blocks.append({"type": "text", "text": message.content})
# Add any missing tool calls from message.tool_calls field
content_tool_call_ids = {
block.get("id")
for block in string_blocks
if isinstance(block, dict) and block.get("type") == "tool_call"
}
for tool_call in message.tool_calls:
id_ = tool_call.get("id")
if id_ and id_ not in content_tool_call_ids:
string_tool_call_block: types.ToolCall = {
"type": "tool_call",
"id": id_,
"name": tool_call["name"],
"args": tool_call["args"],
}
string_blocks.append(string_tool_call_block)
# Handle audio from additional_kwargs if present (for empty content cases)
audio_data = message.additional_kwargs.get("audio")
if audio_data and isinstance(audio_data, bytes):
audio_block: types.AudioContentBlock = {
"type": "audio",
"base64": _bytes_to_b64_str(audio_data),
"mime_type": "audio/wav", # Default to WAV for Google GenAI
}
string_blocks.append(audio_block)
grounding_metadata = message.response_metadata.get("grounding_metadata")
if grounding_metadata:
citations = translate_grounding_metadata_to_citations(grounding_metadata)
for block in string_blocks:
if block["type"] == "text" and citations:
# Add citations to the first text block only
block["annotations"] = cast("list[types.Annotation]", citations)
break
return string_blocks
if not isinstance(message.content, list):
# Unexpected content type, attempt to represent as text
return [{"type": "text", "text": str(message.content)}]
converted_blocks: list[types.ContentBlock] = []
for item in message.content:
if isinstance(item, str):
# Conversation history strings
# Citations are handled below after all blocks are converted
converted_blocks.append({"type": "text", "text": item}) # TextContentBlock
elif isinstance(item, dict):
item_type = item.get("type")
if item_type == "image_url":
# Convert image_url to standard image block (base64)
# (since the original implementation returned as url-base64 CC style)
image_url = item.get("image_url", {})
url = image_url.get("url", "")
if url:
# Extract base64 data
match = re.match(r"data:([^;]+);base64,(.+)", url)
if match:
# Data URI provided
mime_type, base64_data = match.groups()
converted_blocks.append(
{
"type": "image",
"base64": base64_data,
"mime_type": mime_type,
}
)
else:
# Assume it's raw base64 without data URI
try:
# Validate base64 and decode for MIME type detection
decoded_bytes = base64.b64decode(url, validate=True)
image_url_b64_block = {
"type": "image",
"base64": url,
}
if _HAS_FILETYPE:
# Guess MIME type based on file bytes
mime_type = None
kind = filetype.guess(decoded_bytes)
if kind:
mime_type = kind.mime
if mime_type:
image_url_b64_block["mime_type"] = mime_type
converted_blocks.append(
cast("types.ImageContentBlock", image_url_b64_block)
)
except Exception:
# Not valid base64, treat as non-standard
converted_blocks.append(
{
"type": "non_standard",
"value": item,
}
)
else:
# This likely won't be reached according to previous implementations
converted_blocks.append({"type": "non_standard", "value": item})
msg = "Image URL not a data URI; appending as non-standard block."
raise ValueError(msg)
elif item_type == "function_call":
# Handle Google GenAI function calls
function_call_block: types.ToolCall = {
"type": "tool_call",
"name": item.get("name", ""),
"args": item.get("args", {}),
"id": item.get("id", ""),
}
converted_blocks.append(function_call_block)
elif item_type == "file_data":
# Handle FileData URI-based content
file_block: types.FileContentBlock = {
"type": "file",
"url": item.get("file_uri", ""),
}
if mime_type := item.get("mime_type"):
file_block["mime_type"] = mime_type
converted_blocks.append(file_block)
elif item_type == "thinking":
# Handling for the 'thinking' type we package thoughts as
reasoning_block: types.ReasoningContentBlock = {
"type": "reasoning",
"reasoning": item.get("thinking", ""),
}
if signature := item.get("signature"):
reasoning_block["extras"] = {"signature": signature}
converted_blocks.append(reasoning_block)
elif item_type == "executable_code":
# Convert to standard server tool call block at the moment
server_tool_call_block: types.ServerToolCall = {
"type": "server_tool_call",
"name": "code_interpreter",
"args": {
"code": item.get("executable_code", ""),
"language": item.get("language", "python"), # Default to python
},
"id": item.get("id", ""),
}
converted_blocks.append(server_tool_call_block)
elif item_type == "code_execution_result":
# Map outcome to status: OUTCOME_OK (1) → success, else → error
outcome = item.get("outcome", 1)
status = "success" if outcome == 1 else "error"
server_tool_result_block: types.ServerToolResult = {
"type": "server_tool_result",
"tool_call_id": item.get("tool_call_id", ""),
"status": status, # type: ignore[typeddict-item]
"output": item.get("code_execution_result", ""),
}
server_tool_result_block["extras"] = {"block_type": item_type}
# Preserve original outcome in extras
if outcome is not None:
server_tool_result_block["extras"]["outcome"] = outcome
converted_blocks.append(server_tool_result_block)
elif item_type == "text":
converted_blocks.append(cast("types.TextContentBlock", item))
else:
# Unknown type, preserve as non-standard
converted_blocks.append({"type": "non_standard", "value": item})
else:
# Non-dict, non-string content
converted_blocks.append({"type": "non_standard", "value": item})
grounding_metadata = message.response_metadata.get("grounding_metadata")
if grounding_metadata:
citations = translate_grounding_metadata_to_citations(grounding_metadata)
for block in converted_blocks:
if block["type"] == "text" and citations:
# Add citations to text blocks (only the first text block)
block["annotations"] = cast("list[types.Annotation]", citations)
break
# Audio is stored on the message.additional_kwargs
audio_data = message.additional_kwargs.get("audio")
if audio_data and isinstance(audio_data, bytes):
audio_block_kwargs: types.AudioContentBlock = {
"type": "audio",
"base64": _bytes_to_b64_str(audio_data),
"mime_type": "audio/wav", # Default to WAV for Google GenAI
}
converted_blocks.append(audio_block_kwargs)
# Add any missing tool calls from message.tool_calls field
content_tool_call_ids = {
block.get("id")
for block in converted_blocks
if isinstance(block, dict) and block.get("type") == "tool_call"
}
for tool_call in message.tool_calls:
id_ = tool_call.get("id")
if id_ and id_ not in content_tool_call_ids:
missing_tool_call_block: types.ToolCall = {
"type": "tool_call",
"id": id_,
"name": tool_call["name"],
"args": tool_call["args"],
}
converted_blocks.append(missing_tool_call_block)
return converted_blocks
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message with Google (GenAI) content.
Args:
message: The message to translate.
Returns:
The derived content blocks.
"""
return _convert_to_v1_from_genai(message)
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
"""Derive standard content blocks from a chunk with Google (GenAI) content.
Args:
message: The message chunk to translate.
Returns:
The derived content blocks.
"""
return _convert_to_v1_from_genai(message)
def _register_google_genai_translator() -> None:
"""Register the Google (GenAI) translator with the central registry.
Run automatically when the module is imported.
"""
from langchain_core.messages.block_translators import ( # noqa: PLC0415
register_translator,
)
register_translator("google_genai", translate_content, translate_content_chunk)
_register_google_genai_translator()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/block_translators/google_genai.py",
"license": "MIT License",
"lines": 483,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/langchain_core/messages/block_translators/google_vertexai.py | """Derivations of standard content blocks from Google (VertexAI) content."""
from langchain_core.messages.block_translators.google_genai import (
translate_content,
translate_content_chunk,
)
def _register_google_vertexai_translator() -> None:
"""Register the Google (VertexAI) translator with the central registry.
Run automatically when the module is imported.
"""
from langchain_core.messages.block_translators import ( # noqa: PLC0415
register_translator,
)
register_translator("google_vertexai", translate_content, translate_content_chunk)
_register_google_vertexai_translator()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/block_translators/google_vertexai.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/core/langchain_core/messages/block_translators/groq.py | """Derivations of standard content blocks from Groq content."""
import json
import re
from typing import Any
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_core.messages import content as types
from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
def _populate_extras(
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
) -> types.ContentBlock:
"""Mutate a block, populating extras."""
if standard_block.get("type") == "non_standard":
return standard_block
for key, value in block.items():
if key not in known_fields:
if "extras" not in standard_block:
# Below type-ignores are because mypy thinks a non-standard block can
# get here, although we exclude them above.
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
standard_block["extras"][key] = value # type: ignore[typeddict-item]
return standard_block
def _parse_code_json(s: str) -> dict:
"""Extract Python code from Groq built-in tool content.
Extracts the value of the 'code' field from a string of the form:
{"code": some_arbitrary_text_with_unescaped_quotes}
As Groq may not escape quotes in the executed tools, e.g.:
```
'{"code": "import math; print("The square root of 101 is: "); print(math.sqrt(101))"}'
```
""" # noqa: E501
m = re.fullmatch(r'\s*\{\s*"code"\s*:\s*"(.*)"\s*\}\s*', s, flags=re.DOTALL)
if not m:
msg = (
"Could not extract Python code from Groq tool arguments. "
"Expected a JSON object with a 'code' field."
)
raise ValueError(msg)
return {"code": m.group(1)}
def _convert_to_v1_from_groq(message: AIMessage) -> list[types.ContentBlock]:
"""Convert groq message content to v1 format."""
content_blocks: list[types.ContentBlock] = []
if reasoning_block := _extract_reasoning_from_additional_kwargs(message):
content_blocks.append(reasoning_block)
if executed_tools := message.additional_kwargs.get("executed_tools"):
for idx, executed_tool in enumerate(executed_tools):
args: dict[str, Any] | None = None
if arguments := executed_tool.get("arguments"):
try:
args = json.loads(arguments)
except json.JSONDecodeError:
if executed_tool.get("type") == "python":
try:
args = _parse_code_json(arguments)
except ValueError:
continue
elif (
executed_tool.get("type") == "function"
and executed_tool.get("name") == "python"
):
# GPT-OSS
args = {"code": arguments}
else:
continue
if isinstance(args, dict):
name = ""
if executed_tool.get("type") == "search":
name = "web_search"
elif executed_tool.get("type") == "python" or (
executed_tool.get("type") == "function"
and executed_tool.get("name") == "python"
):
name = "code_interpreter"
server_tool_call: types.ServerToolCall = {
"type": "server_tool_call",
"name": name,
"id": str(idx),
"args": args,
}
content_blocks.append(server_tool_call)
if tool_output := executed_tool.get("output"):
tool_result: types.ServerToolResult = {
"type": "server_tool_result",
"tool_call_id": str(idx),
"output": tool_output,
"status": "success",
}
known_fields = {"type", "arguments", "index", "output"}
_populate_extras(tool_result, executed_tool, known_fields)
content_blocks.append(tool_result)
if isinstance(message.content, str) and message.content:
content_blocks.append({"type": "text", "text": message.content})
content_blocks.extend(
{
"type": "tool_call",
"name": tool_call["name"],
"args": tool_call["args"],
"id": tool_call.get("id"),
}
for tool_call in message.tool_calls
)
return content_blocks
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message with groq content.
Args:
message: The message to translate.
Returns:
The derived content blocks.
"""
return _convert_to_v1_from_groq(message)
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message chunk with groq content.
Args:
message: The message chunk to translate.
Returns:
The derived content blocks.
"""
return _convert_to_v1_from_groq(message)
def _register_groq_translator() -> None:
"""Register the groq translator with the central registry.
Run automatically when the module is imported.
"""
from langchain_core.messages.block_translators import ( # noqa: PLC0415
register_translator,
)
register_translator("groq", translate_content, translate_content_chunk)
_register_groq_translator()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/block_translators/groq.py",
"license": "MIT License",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/langchain_core/messages/block_translators/langchain_v0.py | """Derivations of standard content blocks from LangChain v0 multimodal content."""
from typing import Any, cast
from langchain_core.messages import content as types
def _convert_v0_multimodal_input_to_v1(
content: list[types.ContentBlock],
) -> list[types.ContentBlock]:
"""Convert v0 multimodal blocks to v1 format.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
field. This function attempts to unpack those blocks and convert any v0 format
blocks to v1 format.
If conversion fails, the block is left as a `'non_standard'` block.
Args:
content: List of content blocks to process.
Returns:
v1 content blocks.
"""
converted_blocks = []
unpacked_blocks: list[dict[str, Any]] = [
cast("dict[str, Any]", block)
if block.get("type") != "non_standard"
else block["value"] # type: ignore[typeddict-item] # this is only non-standard blocks
for block in content
]
for block in unpacked_blocks:
if block.get("type") in {"image", "audio", "file"} and "source_type" in block:
converted_block = _convert_legacy_v0_content_block_to_v1(block)
converted_blocks.append(cast("types.ContentBlock", converted_block))
elif block.get("type") in types.KNOWN_BLOCK_TYPES:
# Guard in case this function is used outside of the .content_blocks flow
converted_blocks.append(cast("types.ContentBlock", block))
else:
converted_blocks.append({"type": "non_standard", "value": block})
return converted_blocks
def _convert_legacy_v0_content_block_to_v1(
block: dict,
) -> types.ContentBlock | dict:
"""Convert a LangChain v0 content block to v1 format.
Preserves unknown keys as extras to avoid data loss.
Returns the original block unchanged if it's not in v0 format.
"""
def _extract_v0_extras(block_dict: dict, known_keys: set[str]) -> dict[str, Any]:
"""Extract unknown keys from v0 block to preserve as extras.
Args:
block_dict: The original v0 block dictionary.
known_keys: Set of keys known to be part of the v0 format for this block.
Returns:
A dictionary of extra keys not part of the known v0 format.
"""
return {k: v for k, v in block_dict.items() if k not in known_keys}
# Check if this is actually a v0 format block
block_type = block.get("type")
if block_type not in {"image", "audio", "file"} or "source_type" not in block:
# Not a v0 format block, return unchanged
return block
if block.get("type") == "image":
source_type = block.get("source_type")
if source_type == "url":
# image-url
known_keys = {"mime_type", "type", "source_type", "url"}
extras = _extract_v0_extras(block, known_keys)
if "id" in block:
return types.create_image_block(
url=block["url"],
mime_type=block.get("mime_type"),
id=block["id"],
**extras,
)
# Don't construct with an ID if not present in original block
v1_image_url = types.ImageContentBlock(type="image", url=block["url"])
if block.get("mime_type"):
v1_image_url["mime_type"] = block["mime_type"]
v1_image_url["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_image_url["extras"][key] = value
if v1_image_url["extras"] == {}:
del v1_image_url["extras"]
return v1_image_url
if source_type == "base64":
# image-base64
known_keys = {"mime_type", "type", "source_type", "data"}
extras = _extract_v0_extras(block, known_keys)
if "id" in block:
return types.create_image_block(
base64=block["data"],
mime_type=block.get("mime_type"),
id=block["id"],
**extras,
)
v1_image_base64 = types.ImageContentBlock(
type="image", base64=block["data"]
)
if block.get("mime_type"):
v1_image_base64["mime_type"] = block["mime_type"]
v1_image_base64["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_image_base64["extras"][key] = value
if v1_image_base64["extras"] == {}:
del v1_image_base64["extras"]
return v1_image_base64
if source_type == "id":
# image-id
known_keys = {"type", "source_type", "id"}
extras = _extract_v0_extras(block, known_keys)
# For id `source_type`, `id` is the file reference, not block ID
v1_image_id = types.ImageContentBlock(type="image", file_id=block["id"])
v1_image_id["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_image_id["extras"][key] = value
if v1_image_id["extras"] == {}:
del v1_image_id["extras"]
return v1_image_id
elif block.get("type") == "audio":
source_type = block.get("source_type")
if source_type == "url":
# audio-url
known_keys = {"mime_type", "type", "source_type", "url"}
extras = _extract_v0_extras(block, known_keys)
if "id" in block:
return types.create_audio_block(
url=block["url"],
mime_type=block.get("mime_type"),
id=block["id"],
**extras,
)
# Don't construct with an ID if not present in original block
v1_audio_url: types.AudioContentBlock = types.AudioContentBlock(
type="audio", url=block["url"]
)
if block.get("mime_type"):
v1_audio_url["mime_type"] = block["mime_type"]
v1_audio_url["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_audio_url["extras"][key] = value
if v1_audio_url["extras"] == {}:
del v1_audio_url["extras"]
return v1_audio_url
if source_type == "base64":
# audio-base64
known_keys = {"mime_type", "type", "source_type", "data"}
extras = _extract_v0_extras(block, known_keys)
if "id" in block:
return types.create_audio_block(
base64=block["data"],
mime_type=block.get("mime_type"),
id=block["id"],
**extras,
)
v1_audio_base64: types.AudioContentBlock = types.AudioContentBlock(
type="audio", base64=block["data"]
)
if block.get("mime_type"):
v1_audio_base64["mime_type"] = block["mime_type"]
v1_audio_base64["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_audio_base64["extras"][key] = value
if v1_audio_base64["extras"] == {}:
del v1_audio_base64["extras"]
return v1_audio_base64
if source_type == "id":
# audio-id
known_keys = {"type", "source_type", "id"}
extras = _extract_v0_extras(block, known_keys)
v1_audio_id: types.AudioContentBlock = types.AudioContentBlock(
type="audio", file_id=block["id"]
)
v1_audio_id["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_audio_id["extras"][key] = value
if v1_audio_id["extras"] == {}:
del v1_audio_id["extras"]
return v1_audio_id
elif block.get("type") == "file":
source_type = block.get("source_type")
if source_type == "url":
# file-url
known_keys = {"mime_type", "type", "source_type", "url"}
extras = _extract_v0_extras(block, known_keys)
if "id" in block:
return types.create_file_block(
url=block["url"],
mime_type=block.get("mime_type"),
id=block["id"],
**extras,
)
v1_file_url: types.FileContentBlock = types.FileContentBlock(
type="file", url=block["url"]
)
if block.get("mime_type"):
v1_file_url["mime_type"] = block["mime_type"]
v1_file_url["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_file_url["extras"][key] = value
if v1_file_url["extras"] == {}:
del v1_file_url["extras"]
return v1_file_url
if source_type == "base64":
# file-base64
known_keys = {"mime_type", "type", "source_type", "data"}
extras = _extract_v0_extras(block, known_keys)
if "id" in block:
return types.create_file_block(
base64=block["data"],
mime_type=block.get("mime_type"),
id=block["id"],
**extras,
)
v1_file_base64: types.FileContentBlock = types.FileContentBlock(
type="file", base64=block["data"]
)
if block.get("mime_type"):
v1_file_base64["mime_type"] = block["mime_type"]
v1_file_base64["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_file_base64["extras"][key] = value
if v1_file_base64["extras"] == {}:
del v1_file_base64["extras"]
return v1_file_base64
if source_type == "id":
# file-id
known_keys = {"type", "source_type", "id"}
extras = _extract_v0_extras(block, known_keys)
return types.create_file_block(file_id=block["id"], **extras)
if source_type == "text":
# file-text
known_keys = {"mime_type", "type", "source_type", "url"}
extras = _extract_v0_extras(block, known_keys)
if "id" in block:
return types.create_plaintext_block(
# In v0, URL points to the text file content
# TODO: attribute this claim
text=block["url"],
id=block["id"],
**extras,
)
v1_file_text: types.PlainTextContentBlock = types.PlainTextContentBlock(
type="text-plain", text=block["url"], mime_type="text/plain"
)
if block.get("mime_type"):
v1_file_text["mime_type"] = block["mime_type"]
v1_file_text["extras"] = {}
for key, value in extras.items():
if value is not None:
v1_file_text["extras"][key] = value
if v1_file_text["extras"] == {}:
del v1_file_text["extras"]
return v1_file_text
# If we can't convert, return the block unchanged
return block
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/block_translators/langchain_v0.py",
"license": "MIT License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/langchain_core/messages/block_translators/openai.py | """Derivations of standard content blocks from OpenAI content."""
from __future__ import annotations
import json
import warnings
from typing import TYPE_CHECKING, Any, Literal, cast
from langchain_core.language_models._utils import (
_parse_data_uri,
is_openai_data_block,
)
from langchain_core.messages import AIMessageChunk
from langchain_core.messages import content as types
if TYPE_CHECKING:
from collections.abc import Iterator
from langchain_core.messages import AIMessage
def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
"""Convert `ImageContentBlock` to format expected by OpenAI Chat Completions.
Args:
block: The image content block to convert.
Raises:
ValueError: If required keys are missing.
ValueError: If source type is unsupported.
Returns:
The formatted image content block.
"""
if "url" in block:
return {
"type": "image_url",
"image_url": {
"url": block["url"],
},
}
if "base64" in block or block.get("source_type") == "base64":
if "mime_type" not in block:
error_message = "mime_type key is required for base64 data."
raise ValueError(error_message)
mime_type = block["mime_type"]
base64_data = block["data"] if "data" in block else block["base64"]
return {
"type": "image_url",
"image_url": {
"url": f"data:{mime_type};base64,{base64_data}",
},
}
error_message = "Unsupported source type. Only 'url' and 'base64' are supported."
raise ValueError(error_message)
def convert_to_openai_data_block(
block: dict, api: Literal["chat/completions", "responses"] = "chat/completions"
) -> dict:
"""Format standard data content block to format expected by OpenAI.
"Standard data content block" can include old-style LangChain v0 blocks
(URLContentBlock, Base64ContentBlock, IDContentBlock) or new ones.
Args:
block: The content block to convert.
api: The OpenAI API being targeted. Either "chat/completions" or "responses".
Raises:
ValueError: If required keys are missing.
ValueError: If file URLs are used with Chat Completions API.
ValueError: If block type is unsupported.
Returns:
The formatted content block.
"""
if block["type"] == "image":
chat_completions_block = convert_to_openai_image_block(block)
if api == "responses":
formatted_block = {
"type": "input_image",
"image_url": chat_completions_block["image_url"]["url"],
}
if chat_completions_block["image_url"].get("detail"):
formatted_block["detail"] = chat_completions_block["image_url"][
"detail"
]
else:
formatted_block = chat_completions_block
elif block["type"] == "file":
if block.get("source_type") == "base64" or "base64" in block:
# Handle v0 format (Base64CB): {"source_type": "base64", "data": "...", ...}
# Handle v1 format (IDCB): {"base64": "...", ...}
base64_data = block["data"] if "source_type" in block else block["base64"]
file = {"file_data": f"data:{block['mime_type']};base64,{base64_data}"}
if filename := block.get("filename"):
file["filename"] = filename
elif (extras := block.get("extras")) and ("filename" in extras):
file["filename"] = extras["filename"]
elif (extras := block.get("metadata")) and ("filename" in extras):
# Backward compat
file["filename"] = extras["filename"]
else:
# Can't infer filename
warnings.warn(
"OpenAI may require a filename for file uploads. Specify a filename"
" in the content block, e.g.: {'type': 'file', 'mime_type': "
"'...', 'base64': '...', 'filename': 'my-file.pdf'}",
stacklevel=1,
)
formatted_block = {"type": "file", "file": file}
if api == "responses":
formatted_block = {"type": "input_file", **formatted_block["file"]}
elif block.get("source_type") == "id" or "file_id" in block:
# Handle v0 format (IDContentBlock): {"source_type": "id", "id": "...", ...}
# Handle v1 format (IDCB): {"file_id": "...", ...}
file_id = block["id"] if "source_type" in block else block["file_id"]
formatted_block = {"type": "file", "file": {"file_id": file_id}}
if api == "responses":
formatted_block = {"type": "input_file", **formatted_block["file"]}
elif "url" in block: # Intentionally do not check for source_type="url"
if api == "chat/completions":
error_msg = "OpenAI Chat Completions does not support file URLs."
raise ValueError(error_msg)
# Only supported by Responses API; return in that format
formatted_block = {"type": "input_file", "file_url": block["url"]}
else:
error_msg = "Keys base64, url, or file_id required for file blocks."
raise ValueError(error_msg)
elif block["type"] == "audio":
if "base64" in block or block.get("source_type") == "base64":
# Handle v0 format: {"source_type": "base64", "data": "...", ...}
# Handle v1 format: {"base64": "...", ...}
base64_data = block["data"] if "source_type" in block else block["base64"]
audio_format = block["mime_type"].split("/")[-1]
formatted_block = {
"type": "input_audio",
"input_audio": {"data": base64_data, "format": audio_format},
}
else:
error_msg = "Key base64 is required for audio blocks."
raise ValueError(error_msg)
else:
error_msg = f"Block of type {block['type']} is not supported."
raise ValueError(error_msg)
return formatted_block
# v1 / Chat Completions
def _convert_to_v1_from_chat_completions(
message: AIMessage,
) -> list[types.ContentBlock]:
"""Mutate a Chat Completions message to v1 format."""
content_blocks: list[types.ContentBlock] = []
if isinstance(message.content, str):
if message.content:
content_blocks = [{"type": "text", "text": message.content}]
else:
content_blocks = []
for tool_call in message.tool_calls:
content_blocks.append(
{
"type": "tool_call",
"name": tool_call["name"],
"args": tool_call["args"],
"id": tool_call.get("id"),
}
)
return content_blocks
def _convert_to_v1_from_chat_completions_input(
content: list[types.ContentBlock],
) -> list[types.ContentBlock]:
"""Convert OpenAI Chat Completions format blocks to v1 format.
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
block as a `'non_standard'` block with the original block stored in the `value`
field. This function attempts to unpack those blocks and convert any blocks that
might be OpenAI format to v1 ContentBlocks.
If conversion fails, the block is left as a `'non_standard'` block.
Args:
content: List of content blocks to process.
Returns:
Updated list with OpenAI blocks converted to v1 format.
"""
converted_blocks = []
unpacked_blocks: list[dict[str, Any]] = [
cast("dict[str, Any]", block)
if block.get("type") != "non_standard"
else block["value"] # type: ignore[typeddict-item] # this is only non-standard blocks
for block in content
]
for block in unpacked_blocks:
if block.get("type") in {
"image_url",
"input_audio",
"file",
} and is_openai_data_block(block):
converted_block = _convert_openai_format_to_data_block(block)
# If conversion succeeded, use it; otherwise keep as non_standard
if (
isinstance(converted_block, dict)
and converted_block.get("type") in types.KNOWN_BLOCK_TYPES
):
converted_blocks.append(cast("types.ContentBlock", converted_block))
else:
converted_blocks.append({"type": "non_standard", "value": block})
elif block.get("type") in types.KNOWN_BLOCK_TYPES:
converted_blocks.append(cast("types.ContentBlock", block))
else:
converted_blocks.append({"type": "non_standard", "value": block})
return converted_blocks
def _convert_to_v1_from_chat_completions_chunk(
chunk: AIMessageChunk,
) -> list[types.ContentBlock]:
"""Mutate a Chat Completions chunk to v1 format."""
content_blocks: list[types.ContentBlock] = []
if isinstance(chunk.content, str):
if chunk.content:
content_blocks = [{"type": "text", "text": chunk.content}]
else:
content_blocks = []
if chunk.chunk_position == "last":
for tool_call in chunk.tool_calls:
content_blocks.append(
{
"type": "tool_call",
"name": tool_call["name"],
"args": tool_call["args"],
"id": tool_call.get("id"),
}
)
else:
for tool_call_chunk in chunk.tool_call_chunks:
tc: types.ToolCallChunk = {
"type": "tool_call_chunk",
"id": tool_call_chunk.get("id"),
"name": tool_call_chunk.get("name"),
"args": tool_call_chunk.get("args"),
}
if (idx := tool_call_chunk.get("index")) is not None:
tc["index"] = idx
content_blocks.append(tc)
return content_blocks
def _convert_from_v1_to_chat_completions(message: AIMessage) -> AIMessage:
"""Convert a v1 message to the Chat Completions format."""
if isinstance(message.content, list):
new_content: list = []
for block in message.content:
if isinstance(block, dict):
block_type = block.get("type")
if block_type == "text":
# Strip annotations
new_content.append({"type": "text", "text": block["text"]})
elif block_type in {"reasoning", "tool_call"}:
pass
else:
new_content.append(block)
else:
new_content.append(block)
return message.model_copy(update={"content": new_content})
return message
# Responses
_FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
"""Convert v0 AIMessage into `output_version="responses/v1"` format."""
# Only update ChatOpenAI v0.3 AIMessages
is_chatopenai_v03 = (
isinstance(message.content, list)
and all(isinstance(b, dict) for b in message.content)
) and (
any(
item in message.additional_kwargs
for item in [
"reasoning",
"tool_outputs",
"refusal",
_FUNCTION_CALL_IDS_MAP_KEY,
]
)
or (
isinstance(message.id, str)
and message.id.startswith("msg_")
and (response_id := message.response_metadata.get("id"))
and isinstance(response_id, str)
and response_id.startswith("resp_")
)
)
if not is_chatopenai_v03:
return message
content_order = [
"reasoning",
"code_interpreter_call",
"mcp_call",
"image_generation_call",
"text",
"refusal",
"function_call",
"computer_call",
"mcp_list_tools",
"mcp_approval_request",
# N. B. "web_search_call" and "file_search_call" were not passed back in
# in v0.3
]
# Build a bucket for every known block type
buckets: dict[str, list] = {key: [] for key in content_order}
unknown_blocks = []
# Reasoning
if reasoning := message.additional_kwargs.get("reasoning"):
if isinstance(message, AIMessageChunk) and message.chunk_position != "last":
buckets["reasoning"].append({**reasoning, "type": "reasoning"})
else:
buckets["reasoning"].append(reasoning)
# Refusal
if refusal := message.additional_kwargs.get("refusal"):
buckets["refusal"].append({"type": "refusal", "refusal": refusal})
# Text
for block in message.content:
if isinstance(block, dict) and block.get("type") == "text":
block_copy = block.copy()
if isinstance(message.id, str) and message.id.startswith("msg_"):
block_copy["id"] = message.id
buckets["text"].append(block_copy)
else:
unknown_blocks.append(block)
# Function calls
function_call_ids = message.additional_kwargs.get(_FUNCTION_CALL_IDS_MAP_KEY)
if (
isinstance(message, AIMessageChunk)
and len(message.tool_call_chunks) == 1
and message.chunk_position != "last"
):
# Isolated chunk
tool_call_chunk = message.tool_call_chunks[0]
function_call = {
"type": "function_call",
"name": tool_call_chunk.get("name"),
"arguments": tool_call_chunk.get("args"),
"call_id": tool_call_chunk.get("id"),
}
if function_call_ids is not None and (
id_ := function_call_ids.get(tool_call_chunk.get("id"))
):
function_call["id"] = id_
buckets["function_call"].append(function_call)
else:
for tool_call in message.tool_calls:
function_call = {
"type": "function_call",
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"], ensure_ascii=False),
"call_id": tool_call["id"],
}
if function_call_ids is not None and (
id_ := function_call_ids.get(tool_call["id"])
):
function_call["id"] = id_
buckets["function_call"].append(function_call)
# Tool outputs
tool_outputs = message.additional_kwargs.get("tool_outputs", [])
for block in tool_outputs:
if isinstance(block, dict) and (key := block.get("type")) and key in buckets:
buckets[key].append(block)
else:
unknown_blocks.append(block)
# Re-assemble the content list in the canonical order
new_content = []
for key in content_order:
new_content.extend(buckets[key])
new_content.extend(unknown_blocks)
new_additional_kwargs = dict(message.additional_kwargs)
new_additional_kwargs.pop("reasoning", None)
new_additional_kwargs.pop("refusal", None)
new_additional_kwargs.pop("tool_outputs", None)
if "id" in message.response_metadata:
new_id = message.response_metadata["id"]
else:
new_id = message.id
return message.model_copy(
update={
"content": new_content,
"additional_kwargs": new_additional_kwargs,
"id": new_id,
},
deep=False,
)
def _convert_openai_format_to_data_block(
block: dict,
) -> types.ContentBlock | dict[Any, Any]:
"""Convert OpenAI image/audio/file content block to respective v1 multimodal block.
We expect that the incoming block is verified to be in OpenAI Chat Completions
format.
If parsing fails, passes block through unchanged.
Mappings (Chat Completions to LangChain v1):
- Image -> `ImageContentBlock`
- Audio -> `AudioContentBlock`
- File -> `FileContentBlock`
"""
# Extract extra keys to put them in `extras`
def _extract_extras(block_dict: dict, known_keys: set[str]) -> dict[str, Any]:
"""Extract unknown keys from block to preserve as extras."""
return {k: v for k, v in block_dict.items() if k not in known_keys}
# base64-style image block
if (block["type"] == "image_url") and (
parsed := _parse_data_uri(block["image_url"]["url"])
):
known_keys = {"type", "image_url"}
extras = _extract_extras(block, known_keys)
# Also extract extras from nested image_url dict
image_url_known_keys = {"url"}
image_url_extras = _extract_extras(block["image_url"], image_url_known_keys)
# Merge extras
all_extras = {**extras}
for key, value in image_url_extras.items():
if key == "detail": # Don't rename
all_extras["detail"] = value
else:
all_extras[f"image_url_{key}"] = value
return types.create_image_block(
# Even though this is labeled as `url`, it can be base64-encoded
base64=parsed["data"],
mime_type=parsed["mime_type"],
**all_extras,
)
# url-style image block
if (block["type"] == "image_url") and isinstance(
block["image_url"].get("url"), str
):
known_keys = {"type", "image_url"}
extras = _extract_extras(block, known_keys)
image_url_known_keys = {"url"}
image_url_extras = _extract_extras(block["image_url"], image_url_known_keys)
all_extras = {**extras}
for key, value in image_url_extras.items():
if key == "detail": # Don't rename
all_extras["detail"] = value
else:
all_extras[f"image_url_{key}"] = value
return types.create_image_block(
url=block["image_url"]["url"],
**all_extras,
)
# base64-style audio block
# audio is only represented via raw data, no url or ID option
if block["type"] == "input_audio":
known_keys = {"type", "input_audio"}
extras = _extract_extras(block, known_keys)
# Also extract extras from nested audio dict
audio_known_keys = {"data", "format"}
audio_extras = _extract_extras(block["input_audio"], audio_known_keys)
all_extras = {**extras}
for key, value in audio_extras.items():
all_extras[f"audio_{key}"] = value
return types.create_audio_block(
base64=block["input_audio"]["data"],
mime_type=f"audio/{block['input_audio']['format']}",
**all_extras,
)
# id-style file block
if block.get("type") == "file" and "file_id" in block.get("file", {}):
known_keys = {"type", "file"}
extras = _extract_extras(block, known_keys)
file_known_keys = {"file_id"}
file_extras = _extract_extras(block["file"], file_known_keys)
all_extras = {**extras}
for key, value in file_extras.items():
all_extras[f"file_{key}"] = value
return types.create_file_block(
file_id=block["file"]["file_id"],
**all_extras,
)
# base64-style file block
if (block["type"] == "file") and (
parsed := _parse_data_uri(block["file"]["file_data"])
):
known_keys = {"type", "file"}
extras = _extract_extras(block, known_keys)
file_known_keys = {"file_data", "filename"}
file_extras = _extract_extras(block["file"], file_known_keys)
all_extras = {**extras}
for key, value in file_extras.items():
all_extras[f"file_{key}"] = value
filename = block["file"].get("filename")
return types.create_file_block(
base64=parsed["data"],
mime_type="application/pdf",
filename=filename,
**all_extras,
)
# Escape hatch
return block
# v1 / Responses
def _convert_annotation_to_v1(annotation: dict[str, Any]) -> types.Annotation:
annotation_type = annotation.get("type")
if annotation_type == "url_citation":
known_fields = {
"type",
"url",
"title",
"cited_text",
"start_index",
"end_index",
}
url_citation = cast("types.Citation", {})
for field in ("end_index", "start_index", "title"):
if field in annotation:
url_citation[field] = annotation[field]
url_citation["type"] = "citation"
url_citation["url"] = annotation["url"]
for field, value in annotation.items():
if field not in known_fields:
if "extras" not in url_citation:
url_citation["extras"] = {}
url_citation["extras"][field] = value
return url_citation
if annotation_type == "file_citation":
known_fields = {
"type",
"title",
"cited_text",
"start_index",
"end_index",
"filename",
}
document_citation: types.Citation = {"type": "citation"}
if "filename" in annotation:
document_citation["title"] = annotation["filename"]
for field, value in annotation.items():
if field not in known_fields:
if "extras" not in document_citation:
document_citation["extras"] = {}
document_citation["extras"][field] = value
return document_citation
# TODO: standardise container_file_citation?
non_standard_annotation: types.NonStandardAnnotation = {
"type": "non_standard_annotation",
"value": annotation,
}
return non_standard_annotation
def _explode_reasoning(block: dict[str, Any]) -> Iterator[types.ReasoningContentBlock]:
if "summary" not in block:
yield cast("types.ReasoningContentBlock", block)
return
known_fields = {"type", "reasoning", "id", "index"}
unknown_fields = [
field for field in block if field != "summary" and field not in known_fields
]
if unknown_fields:
block["extras"] = {}
for field in unknown_fields:
block["extras"][field] = block.pop(field)
if not block["summary"]:
# [{'id': 'rs_...', 'summary': [], 'type': 'reasoning', 'index': 0}]
block = {k: v for k, v in block.items() if k != "summary"}
if "index" in block:
meaningful_idx = f"{block['index']}_0"
block["index"] = f"lc_rs_{meaningful_idx.encode().hex()}"
yield cast("types.ReasoningContentBlock", block)
return
# Common part for every exploded line, except 'summary'
common = {k: v for k, v in block.items() if k in known_fields}
# Optional keys that must appear only in the first exploded item
first_only = block.pop("extras", None)
for idx, part in enumerate(block["summary"]):
new_block = dict(common)
new_block["reasoning"] = part.get("text", "")
if idx == 0 and first_only:
new_block.update(first_only)
if "index" in new_block:
summary_index = part.get("index", 0)
meaningful_idx = f"{new_block['index']}_{summary_index}"
new_block["index"] = f"lc_rs_{meaningful_idx.encode().hex()}"
yield cast("types.ReasoningContentBlock", new_block)
def _convert_to_v1_from_responses(message: AIMessage) -> list[types.ContentBlock]:
"""Convert a Responses message to v1 format."""
def _iter_blocks() -> Iterator[types.ContentBlock]:
for raw_block in message.content:
if not isinstance(raw_block, dict):
continue
block = raw_block.copy()
block_type = block.get("type")
if block_type == "text":
if "text" not in block:
block["text"] = ""
if "annotations" in block:
block["annotations"] = [
_convert_annotation_to_v1(a) for a in block["annotations"]
]
if "index" in block:
block["index"] = f"lc_txt_{block['index']}"
yield cast("types.TextContentBlock", block)
elif block_type == "reasoning":
yield from _explode_reasoning(block)
elif block_type == "image_generation_call" and (
result := block.get("result")
):
new_block = {"type": "image", "base64": result}
if output_format := block.get("output_format"):
new_block["mime_type"] = f"image/{output_format}"
if "id" in block:
new_block["id"] = block["id"]
if "index" in block:
new_block["index"] = f"lc_img_{block['index']}"
for extra_key in (
"status",
"background",
"output_format",
"quality",
"revised_prompt",
"size",
):
if extra_key in block:
if "extras" not in new_block:
new_block["extras"] = {}
new_block["extras"][extra_key] = block[extra_key]
yield cast("types.ImageContentBlock", new_block)
elif block_type == "function_call":
tool_call_block: (
types.ToolCall | types.InvalidToolCall | types.ToolCallChunk | None
) = None
call_id = block.get("call_id", "")
if (
isinstance(message, AIMessageChunk)
and len(message.tool_call_chunks) == 1
and message.chunk_position != "last"
):
tool_call_block = message.tool_call_chunks[0].copy() # type: ignore[assignment]
elif call_id:
for tool_call in message.tool_calls or []:
if tool_call.get("id") == call_id:
tool_call_block = {
"type": "tool_call",
"name": tool_call["name"],
"args": tool_call["args"],
"id": tool_call.get("id"),
}
break
else:
for invalid_tool_call in message.invalid_tool_calls or []:
if invalid_tool_call.get("id") == call_id:
tool_call_block = invalid_tool_call.copy()
break
if tool_call_block:
if "id" in block:
if "extras" not in tool_call_block:
tool_call_block["extras"] = {}
tool_call_block["extras"]["item_id"] = block["id"]
if "index" in block:
tool_call_block["index"] = f"lc_tc_{block['index']}"
yield tool_call_block
elif block_type == "web_search_call":
web_search_call = {
"type": "server_tool_call",
"name": "web_search",
"args": {},
"id": block["id"],
}
if "index" in block:
web_search_call["index"] = f"lc_wsc_{block['index']}"
sources: dict[str, Any] | None = None
if "action" in block and isinstance(block["action"], dict):
if "sources" in block["action"]:
sources = block["action"]["sources"]
web_search_call["args"] = {
k: v for k, v in block["action"].items() if k != "sources"
}
for key in block:
if key not in {"type", "id", "action", "status", "index"}:
web_search_call[key] = block[key]
yield cast("types.ServerToolCall", web_search_call)
# If .content already has web_search_result, don't add
if not any(
isinstance(other_block, dict)
and other_block.get("type") == "web_search_result"
and other_block.get("id") == block["id"]
for other_block in message.content
):
web_search_result = {
"type": "server_tool_result",
"tool_call_id": block["id"],
}
if sources:
web_search_result["output"] = {"sources": sources}
status = block.get("status")
if status == "failed":
web_search_result["status"] = "error"
elif status == "completed":
web_search_result["status"] = "success"
elif status:
web_search_result["extras"] = {"status": status}
if "index" in block and isinstance(block["index"], int):
web_search_result["index"] = f"lc_wsr_{block['index'] + 1}"
yield cast("types.ServerToolResult", web_search_result)
elif block_type == "file_search_call":
file_search_call = {
"type": "server_tool_call",
"name": "file_search",
"id": block["id"],
"args": {"queries": block.get("queries", [])},
}
if "index" in block:
file_search_call["index"] = f"lc_fsc_{block['index']}"
for key in block:
if key not in {
"type",
"id",
"queries",
"results",
"status",
"index",
}:
file_search_call[key] = block[key]
yield cast("types.ServerToolCall", file_search_call)
file_search_result = {
"type": "server_tool_result",
"tool_call_id": block["id"],
}
if file_search_output := block.get("results"):
file_search_result["output"] = file_search_output
status = block.get("status")
if status == "failed":
file_search_result["status"] = "error"
elif status == "completed":
file_search_result["status"] = "success"
elif status:
file_search_result["extras"] = {"status": status}
if "index" in block and isinstance(block["index"], int):
file_search_result["index"] = f"lc_fsr_{block['index'] + 1}"
yield cast("types.ServerToolResult", file_search_result)
elif block_type == "code_interpreter_call":
code_interpreter_call = {
"type": "server_tool_call",
"name": "code_interpreter",
"id": block["id"],
}
if "code" in block:
code_interpreter_call["args"] = {"code": block["code"]}
if "index" in block:
code_interpreter_call["index"] = f"lc_cic_{block['index']}"
known_fields = {
"type",
"id",
"outputs",
"status",
"code",
"extras",
"index",
}
for key in block:
if key not in known_fields:
if "extras" not in code_interpreter_call:
code_interpreter_call["extras"] = {}
code_interpreter_call["extras"][key] = block[key]
code_interpreter_result = {
"type": "server_tool_result",
"tool_call_id": block["id"],
}
if "outputs" in block:
code_interpreter_result["output"] = block["outputs"]
status = block.get("status")
if status == "failed":
code_interpreter_result["status"] = "error"
elif status == "completed":
code_interpreter_result["status"] = "success"
elif status:
code_interpreter_result["extras"] = {"status": status}
if "index" in block and isinstance(block["index"], int):
code_interpreter_result["index"] = f"lc_cir_{block['index'] + 1}"
yield cast("types.ServerToolCall", code_interpreter_call)
yield cast("types.ServerToolResult", code_interpreter_result)
elif block_type == "mcp_call":
mcp_call = {
"type": "server_tool_call",
"name": "remote_mcp",
"id": block["id"],
}
if (arguments := block.get("arguments")) and isinstance(arguments, str):
try:
mcp_call["args"] = json.loads(block["arguments"])
except json.JSONDecodeError:
mcp_call["extras"] = {"arguments": arguments}
if "name" in block:
if "extras" not in mcp_call:
mcp_call["extras"] = {}
mcp_call["extras"]["tool_name"] = block["name"]
if "server_label" in block:
if "extras" not in mcp_call:
mcp_call["extras"] = {}
mcp_call["extras"]["server_label"] = block["server_label"]
if "index" in block:
mcp_call["index"] = f"lc_mcp_{block['index']}"
known_fields = {
"type",
"id",
"arguments",
"name",
"server_label",
"output",
"error",
"extras",
"index",
}
for key in block:
if key not in known_fields:
if "extras" not in mcp_call:
mcp_call["extras"] = {}
mcp_call["extras"][key] = block[key]
yield cast("types.ServerToolCall", mcp_call)
mcp_result = {
"type": "server_tool_result",
"tool_call_id": block["id"],
}
if mcp_output := block.get("output"):
mcp_result["output"] = mcp_output
error = block.get("error")
if error:
if "extras" not in mcp_result:
mcp_result["extras"] = {}
mcp_result["extras"]["error"] = error
mcp_result["status"] = "error"
else:
mcp_result["status"] = "success"
if "index" in block and isinstance(block["index"], int):
mcp_result["index"] = f"lc_mcpr_{block['index'] + 1}"
yield cast("types.ServerToolResult", mcp_result)
elif block_type == "mcp_list_tools":
mcp_list_tools_call = {
"type": "server_tool_call",
"name": "mcp_list_tools",
"args": {},
"id": block["id"],
}
if "server_label" in block:
mcp_list_tools_call["extras"] = {}
mcp_list_tools_call["extras"]["server_label"] = block[
"server_label"
]
if "index" in block:
mcp_list_tools_call["index"] = f"lc_mlt_{block['index']}"
known_fields = {
"type",
"id",
"name",
"server_label",
"tools",
"error",
"extras",
"index",
}
for key in block:
if key not in known_fields:
if "extras" not in mcp_list_tools_call:
mcp_list_tools_call["extras"] = {}
mcp_list_tools_call["extras"][key] = block[key]
yield cast("types.ServerToolCall", mcp_list_tools_call)
mcp_list_tools_result = {
"type": "server_tool_result",
"tool_call_id": block["id"],
}
if mcp_output := block.get("tools"):
mcp_list_tools_result["output"] = mcp_output
error = block.get("error")
if error:
if "extras" not in mcp_list_tools_result:
mcp_list_tools_result["extras"] = {}
mcp_list_tools_result["extras"]["error"] = error
mcp_list_tools_result["status"] = "error"
else:
mcp_list_tools_result["status"] = "success"
if "index" in block and isinstance(block["index"], int):
mcp_list_tools_result["index"] = f"lc_mltr_{block['index'] + 1}"
yield cast("types.ServerToolResult", mcp_list_tools_result)
elif block_type in types.KNOWN_BLOCK_TYPES:
yield cast("types.ContentBlock", block)
else:
new_block = {"type": "non_standard", "value": block}
if "index" in new_block["value"]:
new_block["index"] = f"lc_ns_{new_block['value'].pop('index')}"
yield cast("types.NonStandardContentBlock", new_block)
return list(_iter_blocks())
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message with OpenAI content.
Args:
message: The message to translate.
Returns:
The derived content blocks.
"""
if isinstance(message.content, str):
return _convert_to_v1_from_chat_completions(message)
message = _convert_from_v03_ai_message(message)
return _convert_to_v1_from_responses(message)
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message chunk with OpenAI content.
Args:
message: The message chunk to translate.
Returns:
The derived content blocks.
"""
if isinstance(message.content, str):
return _convert_to_v1_from_chat_completions_chunk(message)
message = _convert_from_v03_ai_message(message) # type: ignore[assignment]
return _convert_to_v1_from_responses(message)
def _register_openai_translator() -> None:
"""Register the OpenAI translator with the central registry.
Run automatically when the module is imported.
"""
from langchain_core.messages.block_translators import ( # noqa: PLC0415
register_translator,
)
register_translator("openai", translate_content, translate_content_chunk)
_register_openai_translator()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/block_translators/openai.py",
"license": "MIT License",
"lines": 892,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/langchain_core/messages/content.py | """Standard, multimodal content blocks for Large Language Model I/O.
This module provides standardized data structures for representing inputs to and outputs
from LLMs. The core abstraction is the **Content Block**, a `TypedDict`.
**Rationale**
Different LLM providers use distinct and incompatible API schemas. This module provides
a unified, provider-agnostic format to facilitate these interactions. A message to or
from a model is simply a list of content blocks, allowing for the natural interleaving
of text, images, and other content in a single ordered sequence.
An adapter for a specific provider is responsible for translating this standard list of
blocks into the format required by its API.
**Extensibility**
Data **not yet mapped** to a standard block may be represented using the
`NonStandardContentBlock`, which allows for provider-specific data to be included
without losing the benefits of type checking and validation.
Furthermore, provider-specific fields **within** a standard block are fully supported
by default in the `extras` field of each block. This allows for additional metadata
to be included without breaking the standard structure. For example, Google's thought
signature:
```python
AIMessage(
content=[
{
"type": "text",
"text": "J'adore la programmation.",
"extras": {"signature": "EpoWCpc..."}, # Thought signature
}
], ...
)
```
!!! note
Following widespread adoption of [PEP 728](https://peps.python.org/pep-0728/), we
intend to add `extra_items=Any` as a param to Content Blocks. This will signify to
type checkers that additional provider-specific fields are allowed outside of the
`extras` field, and that will become the new standard approach to adding
provider-specific metadata.
??? note
**Example with PEP 728 provider-specific fields:**
```python
# Content block definition
# NOTE: `extra_items=Any`
class TextContentBlock(TypedDict, extra_items=Any):
type: Literal["text"]
id: NotRequired[str]
text: str
annotations: NotRequired[list[Annotation]]
index: NotRequired[int]
```
```python
from langchain_core.messages.content import TextContentBlock
# Create a text content block with provider-specific fields
my_block: TextContentBlock = {
# Add required fields
"type": "text",
"text": "Hello, world!",
# Additional fields not specified in the TypedDict
# These are valid with PEP 728 and are typed as Any
"openai_metadata": {"model": "gpt-4", "temperature": 0.7},
"anthropic_usage": {"input_tokens": 10, "output_tokens": 20},
"custom_field": "any value",
}
# Mutating an existing block to add provider-specific fields
openai_data = my_block["openai_metadata"] # Type: Any
```
**Example Usage**
```python
# Direct construction
from langchain_core.messages.content import TextContentBlock, ImageContentBlock
multimodal_message: AIMessage(
content_blocks=[
TextContentBlock(type="text", text="What is shown in this image?"),
ImageContentBlock(
type="image",
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
mime_type="image/png",
),
]
)
# Using factories
from langchain_core.messages.content import create_text_block, create_image_block
multimodal_message: AIMessage(
content=[
create_text_block("What is shown in this image?"),
create_image_block(
url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
mime_type="image/png",
),
]
)
```
Factory functions offer benefits such as:
- Automatic ID generation (when not provided)
- No need to manually specify the `type` field
"""
from typing import Any, Literal, get_args, get_type_hints
from typing_extensions import NotRequired, TypedDict
from langchain_core.utils.utils import ensure_id
class Citation(TypedDict):
"""Annotation for citing data from a document.
!!! note
`start`/`end` indices refer to the **response text**,
not the source text. This means that the indices are relative to the model's
response, not the original document (as specified in the `url`).
!!! note "Factory function"
`create_citation` may also be used as a factory to create a `Citation`.
Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["citation"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
url: NotRequired[str]
"""URL of the document source."""
title: NotRequired[str]
"""Source document title.
For example, the page title for a web page or the title of a paper.
"""
start_index: NotRequired[int]
"""Start index of the **response text** (`TextContentBlock.text`)."""
end_index: NotRequired[int]
"""End index of the **response text** (`TextContentBlock.text`)"""
cited_text: NotRequired[str]
"""Excerpt of source text being cited."""
# NOTE: not including spans for the raw document text (such as `text_start_index`
# and `text_end_index`) as this is not currently supported by any provider. The
# thinking is that the `cited_text` should be sufficient for most use cases, and it
# is difficult to reliably extract spans from the raw document text across file
# formats or encoding schemes.
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
class NonStandardAnnotation(TypedDict):
"""Provider-specific annotation format."""
type: Literal["non_standard_annotation"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
value: dict[str, Any]
"""Provider-specific annotation data."""
Annotation = Citation | NonStandardAnnotation
"""A union of all defined `Annotation` types."""
class TextContentBlock(TypedDict):
"""Text output from a LLM.
This typically represents the main text content of a message, such as the response
from a language model or the text of a user message.
!!! note "Factory function"
`create_text_block` may also be used as a factory to create a
`TextContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["text"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
text: str
"""Block text."""
annotations: NotRequired[list[Annotation]]
"""`Citation`s and other annotations."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
class ToolCall(TypedDict):
"""Represents an AI's request to call a tool.
Example:
```python
{"name": "foo", "args": {"a": 1}, "id": "123"}
```
This represents a request to call the tool named "foo" with arguments {"a": 1}
and an identifier of "123".
!!! note "Factory function"
`create_tool_call` may also be used as a factory to create a
`ToolCall`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["tool_call"]
"""Used for discrimination."""
id: str | None
"""An identifier associated with the tool call.
An identifier is needed to associate a tool call request with a tool
call result in events when multiple concurrent tool calls are made.
"""
# TODO: Consider making this NotRequired[str] in the future.
name: str
"""The name of the tool to be called."""
args: dict[str, Any]
"""The arguments to the tool call."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
class ToolCallChunk(TypedDict):
"""A chunk of a tool call (yielded when streaming).
When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`),
all string attributes are concatenated. Chunks are only merged if their
values of `index` are equal and not `None`.
Example:
```python
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
(
AIMessageChunk(content="", tool_call_chunks=left_chunks)
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
```
"""
# TODO: Consider making fields NotRequired[str] in the future.
type: Literal["tool_call_chunk"]
"""Used for serialization."""
id: str | None
"""An identifier associated with the tool call.
An identifier is needed to associate a tool call request with a tool
call result in events when multiple concurrent tool calls are made.
"""
# TODO: Consider making this NotRequired[str] in the future.
name: str | None
"""The name of the tool to be called."""
args: str | None
"""The arguments to the tool call."""
index: NotRequired[int | str]
"""The index of the tool call in a sequence."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
class InvalidToolCall(TypedDict):
"""Allowance for errors made by LLM.
Here we add an `error` key to surface errors made during generation
(e.g., invalid JSON arguments.)
"""
# TODO: Consider making fields NotRequired[str] in the future.
type: Literal["invalid_tool_call"]
"""Used for discrimination."""
id: str | None
"""An identifier associated with the tool call.
An identifier is needed to associate a tool call request with a tool
call result in events when multiple concurrent tool calls are made.
"""
# TODO: Consider making this NotRequired[str] in the future.
name: str | None
"""The name of the tool to be called."""
args: str | None
"""The arguments to the tool call."""
error: str | None
"""An error message associated with the tool call."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
class ServerToolCall(TypedDict):
"""Tool call that is executed server-side.
For example: code execution, web search, etc.
"""
type: Literal["server_tool_call"]
"""Used for discrimination."""
id: str
"""An identifier associated with the tool call."""
name: str
"""The name of the tool to be called."""
args: dict[str, Any]
"""The arguments to the tool call."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
class ServerToolCallChunk(TypedDict):
"""A chunk of a server-side tool call (yielded when streaming)."""
type: Literal["server_tool_call_chunk"]
"""Used for discrimination."""
name: NotRequired[str]
"""The name of the tool to be called."""
args: NotRequired[str]
"""JSON substring of the arguments to the tool call."""
id: NotRequired[str]
"""Unique identifier for this server tool call chunk.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
class ServerToolResult(TypedDict):
"""Result of a server-side tool call."""
type: Literal["server_tool_result"]
"""Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this server tool result.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
tool_call_id: str
"""ID of the corresponding server tool call."""
status: Literal["success", "error"]
"""Execution status of the server-side tool."""
output: NotRequired[Any]
"""Output of the executed tool."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
class ReasoningContentBlock(TypedDict):
"""Reasoning output from a LLM.
!!! note "Factory function"
`create_reasoning_block` may also be used as a factory to create a
`ReasoningContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["reasoning"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
reasoning: NotRequired[str]
"""Reasoning text.
Either the thought summary or the raw reasoning text itself.
Often parsed from `<think>` tags in the model's response.
"""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
# Note: `title` and `context` are fields that could be used to provide additional
# information about the file, such as a description or summary of its content.
# E.g. with Claude, you can provide a context for a file which is passed to the model.
class ImageContentBlock(TypedDict):
"""Image data.
!!! note "Factory function"
`create_image_block` may also be used as a factory to create an
`ImageContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["image"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
file_id: NotRequired[str]
"""Reference to the image in an external file storage system.
For example, OpenAI or Anthropic's Files API.
"""
mime_type: NotRequired[str]
"""MIME type of the image.
Required for base64 data.
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#image)
"""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
url: NotRequired[str]
"""URL of the image."""
base64: NotRequired[str]
"""Data as a base64 string."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata. This shouldn't be used for the image data itself."""
class VideoContentBlock(TypedDict):
"""Video data.
!!! note "Factory function"
`create_video_block` may also be used as a factory to create a
`VideoContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["video"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
file_id: NotRequired[str]
"""Reference to the video in an external file storage system.
For example, OpenAI or Anthropic's Files API.
"""
mime_type: NotRequired[str]
"""MIME type of the video.
Required for base64 data.
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#video)
"""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
url: NotRequired[str]
"""URL of the video."""
base64: NotRequired[str]
"""Data as a base64 string."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata. This shouldn't be used for the video data itself."""
class AudioContentBlock(TypedDict):
"""Audio data.
!!! note "Factory function"
`create_audio_block` may also be used as a factory to create an
`AudioContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["audio"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
file_id: NotRequired[str]
"""Reference to the audio file in an external file storage system.
For example, OpenAI or Anthropic's Files API.
"""
mime_type: NotRequired[str]
"""MIME type of the audio.
Required for base64 data.
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml#audio)
"""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
url: NotRequired[str]
"""URL of the audio."""
base64: NotRequired[str]
"""Data as a base64 string."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata. This shouldn't be used for the audio data itself."""
class PlainTextContentBlock(TypedDict):
"""Plaintext data (e.g., from a `.txt` or `.md` document).
!!! note
A `PlainTextContentBlock` existed in `langchain-core<1.0.0`. Although the
name has carried over, the structure has changed significantly. The only shared
keys between the old and new versions are `type` and `text`, though the
`type` value has changed from `'text'` to `'text-plain'`.
!!! note
Title and context are optional fields that may be passed to the model. See
Anthropic [example](https://platform.claude.com/docs/en/build-with-claude/citations#citable-vs-non-citable-content).
!!! note "Factory function"
`create_plaintext_block` may also be used as a factory to create a
`PlainTextContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["text-plain"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
file_id: NotRequired[str]
"""Reference to the plaintext file in an external file storage system.
For example, OpenAI or Anthropic's Files API.
"""
mime_type: Literal["text/plain"]
"""MIME type of the file.
Required for base64 data.
"""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
url: NotRequired[str]
"""URL of the plaintext."""
base64: NotRequired[str]
"""Data as a base64 string."""
text: NotRequired[str]
"""Plaintext content. This is optional if the data is provided as base64."""
title: NotRequired[str]
"""Title of the text data, e.g., the title of a document."""
context: NotRequired[str]
"""Context for the text, e.g., a description or summary of the text's content."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata. This shouldn't be used for the data itself."""
class FileContentBlock(TypedDict):
"""File data that doesn't fit into other multimodal block types.
This block is intended for files that are not images, audio, or plaintext. For
example, it can be used for PDFs, Word documents, etc.
If the file is an image, audio, or plaintext, you should use the corresponding
content block type (e.g., `ImageContentBlock`, `AudioContentBlock`,
`PlainTextContentBlock`).
!!! note "Factory function"
`create_file_block` may also be used as a factory to create a
`FileContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["file"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Used for tracking and referencing specific blocks (e.g., during streaming).
Not to be confused with `file_id`, which references an external file in a
storage system.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
file_id: NotRequired[str]
"""Reference to the file in an external file storage system.
For example, a file ID from OpenAI's Files API or another cloud storage provider.
This is distinct from `id`, which identifies the content block itself.
"""
mime_type: NotRequired[str]
"""MIME type of the file.
Required for base64 data.
[Examples from IANA](https://www.iana.org/assignments/media-types/media-types.xhtml)
"""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
url: NotRequired[str]
"""URL of the file."""
base64: NotRequired[str]
"""Data as a base64 string."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata. This shouldn't be used for the file data itself."""
# Future modalities to consider:
# - 3D models
# - Tabular data
class NonStandardContentBlock(TypedDict):
"""Provider-specific content data.
This block contains data for which there is not yet a standard type.
The purpose of this block should be to simply hold a provider-specific payload.
If a provider's non-standard output includes reasoning and tool calls, it should be
the adapter's job to parse that payload and emit the corresponding standard
`ReasoningContentBlock` and `ToolCalls`.
Has no `extras` field, as provider-specific data should be included in the
`value` field.
!!! note "Factory function"
`create_non_standard_block` may also be used as a factory to create a
`NonStandardContentBlock`. Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["non_standard"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Unique identifier for this content block.
Either:
- Generated by the provider
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
value: dict[str, Any]
"""Provider-specific content data."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
# --- Aliases ---
DataContentBlock = (
ImageContentBlock
| VideoContentBlock
| AudioContentBlock
| PlainTextContentBlock
| FileContentBlock
)
"""A union of all defined multimodal data `ContentBlock` types."""
ToolContentBlock = (
ToolCall | ToolCallChunk | ServerToolCall | ServerToolCallChunk | ServerToolResult
)
ContentBlock = (
TextContentBlock
| InvalidToolCall
| ReasoningContentBlock
| NonStandardContentBlock
| DataContentBlock
| ToolContentBlock
)
"""A union of all defined `ContentBlock` types and aliases."""
KNOWN_BLOCK_TYPES = {
# Text output
"text",
"reasoning",
# Tools
"tool_call",
"invalid_tool_call",
"tool_call_chunk",
# Multimodal data
"image",
"audio",
"file",
"text-plain",
"video",
# Server-side tool calls
"server_tool_call",
"server_tool_call_chunk",
"server_tool_result",
# Catch-all
"non_standard",
# citation and non_standard_annotation intentionally omitted
}
"""These are block types known to `langchain-core >= 1.0.0`.
If a block has a type not in this set, it is considered to be provider-specific.
"""
def _get_data_content_block_types() -> tuple[str, ...]:
"""Get type literals from DataContentBlock union members dynamically.
Example: ("image", "video", "audio", "text-plain", "file")
Note that old style multimodal blocks type literals with new style blocks.
Specifically, "image", "audio", and "file".
See the docstring of `_normalize_messages` in `language_models._utils` for details.
"""
data_block_types = []
for block_type in get_args(DataContentBlock):
hints = get_type_hints(block_type)
if "type" in hints:
type_annotation = hints["type"]
if hasattr(type_annotation, "__args__"):
# This is a Literal type, get the literal value
literal_value = type_annotation.__args__[0]
data_block_types.append(literal_value)
return tuple(data_block_types)
def is_data_content_block(block: dict) -> bool:
"""Check if the provided content block is a data content block.
Returns True for both v0 (old-style) and v1 (new-style) multimodal data blocks.
Args:
block: The content block to check.
Returns:
`True` if the content block is a data content block, `False` otherwise.
"""
if block.get("type") not in _get_data_content_block_types():
return False
if any(key in block for key in ("url", "base64", "file_id", "text")):
# Type is valid and at least one data field is present
# (Accepts old-style image and audio URLContentBlock)
# 'text' is checked to support v0 PlainTextContentBlock types
# We must guard against new style TextContentBlock which also has 'text' `type`
# by ensuring the presence of `source_type`
if block["type"] == "text" and "source_type" not in block: # noqa: SIM103 # This is more readable
return False
return True
if "source_type" in block:
# Old-style content blocks had possible types of 'image', 'audio', and 'file'
# which is not captured in the prior check
source_type = block["source_type"]
if (source_type == "url" and "url" in block) or (
source_type == "base64" and "data" in block
):
return True
if (source_type == "id" and "id" in block) or (
source_type == "text" and "url" in block
):
return True
return False
def create_text_block(
text: str,
*,
id: str | None = None,
annotations: list[Annotation] | None = None,
index: int | str | None = None,
**kwargs: Any,
) -> TextContentBlock:
"""Create a `TextContentBlock`.
Args:
text: The text content of the block.
id: Content block identifier.
Generated automatically if not provided.
annotations: `Citation`s and other annotations for the text.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `TextContentBlock`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
block = TextContentBlock(
type="text",
text=text,
id=ensure_id(id),
)
if annotations is not None:
block["annotations"] = annotations
if index is not None:
block["index"] = index
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_image_block(
*,
url: str | None = None,
base64: str | None = None,
file_id: str | None = None,
mime_type: str | None = None,
id: str | None = None,
index: int | str | None = None,
**kwargs: Any,
) -> ImageContentBlock:
"""Create an `ImageContentBlock`.
Args:
url: URL of the image.
base64: Base64-encoded image data.
file_id: ID of the image file from a file storage system.
mime_type: MIME type of the image.
Required for base64 data.
id: Content block identifier.
Generated automatically if not provided.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `ImageContentBlock`.
Raises:
ValueError: If no image source is provided or if `base64` is used without
`mime_type`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
if not any([url, base64, file_id]):
msg = "Must provide one of: url, base64, or file_id"
raise ValueError(msg)
block = ImageContentBlock(type="image", id=ensure_id(id))
if url is not None:
block["url"] = url
if base64 is not None:
block["base64"] = base64
if file_id is not None:
block["file_id"] = file_id
if mime_type is not None:
block["mime_type"] = mime_type
if index is not None:
block["index"] = index
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_video_block(
*,
url: str | None = None,
base64: str | None = None,
file_id: str | None = None,
mime_type: str | None = None,
id: str | None = None,
index: int | str | None = None,
**kwargs: Any,
) -> VideoContentBlock:
"""Create a `VideoContentBlock`.
Args:
url: URL of the video.
base64: Base64-encoded video data.
file_id: ID of the video file from a file storage system.
mime_type: MIME type of the video.
Required for base64 data.
id: Content block identifier.
Generated automatically if not provided.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `VideoContentBlock`.
Raises:
ValueError: If no video source is provided or if `base64` is used without
`mime_type`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
if not any([url, base64, file_id]):
msg = "Must provide one of: url, base64, or file_id"
raise ValueError(msg)
if base64 and not mime_type:
msg = "mime_type is required when using base64 data"
raise ValueError(msg)
block = VideoContentBlock(type="video", id=ensure_id(id))
if url is not None:
block["url"] = url
if base64 is not None:
block["base64"] = base64
if file_id is not None:
block["file_id"] = file_id
if mime_type is not None:
block["mime_type"] = mime_type
if index is not None:
block["index"] = index
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_audio_block(
*,
url: str | None = None,
base64: str | None = None,
file_id: str | None = None,
mime_type: str | None = None,
id: str | None = None,
index: int | str | None = None,
**kwargs: Any,
) -> AudioContentBlock:
"""Create an `AudioContentBlock`.
Args:
url: URL of the audio.
base64: Base64-encoded audio data.
file_id: ID of the audio file from a file storage system.
mime_type: MIME type of the audio.
Required for base64 data.
id: Content block identifier.
Generated automatically if not provided.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `AudioContentBlock`.
Raises:
ValueError: If no audio source is provided or if `base64` is used without
`mime_type`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
if not any([url, base64, file_id]):
msg = "Must provide one of: url, base64, or file_id"
raise ValueError(msg)
if base64 and not mime_type:
msg = "mime_type is required when using base64 data"
raise ValueError(msg)
block = AudioContentBlock(type="audio", id=ensure_id(id))
if url is not None:
block["url"] = url
if base64 is not None:
block["base64"] = base64
if file_id is not None:
block["file_id"] = file_id
if mime_type is not None:
block["mime_type"] = mime_type
if index is not None:
block["index"] = index
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_file_block(
*,
url: str | None = None,
base64: str | None = None,
file_id: str | None = None,
mime_type: str | None = None,
id: str | None = None,
index: int | str | None = None,
**kwargs: Any,
) -> FileContentBlock:
"""Create a `FileContentBlock`.
Args:
url: URL of the file.
base64: Base64-encoded file data.
file_id: ID of the file from a file storage system.
mime_type: MIME type of the file.
Required for base64 data.
id: Content block identifier.
Generated automatically if not provided.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `FileContentBlock`.
Raises:
ValueError: If no file source is provided or if `base64` is used without
`mime_type`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
if not any([url, base64, file_id]):
msg = "Must provide one of: url, base64, or file_id"
raise ValueError(msg)
if base64 and not mime_type:
msg = "mime_type is required when using base64 data"
raise ValueError(msg)
block = FileContentBlock(type="file", id=ensure_id(id))
if url is not None:
block["url"] = url
if base64 is not None:
block["base64"] = base64
if file_id is not None:
block["file_id"] = file_id
if mime_type is not None:
block["mime_type"] = mime_type
if index is not None:
block["index"] = index
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_plaintext_block(
text: str | None = None,
url: str | None = None,
base64: str | None = None,
file_id: str | None = None,
title: str | None = None,
context: str | None = None,
id: str | None = None,
index: int | str | None = None,
**kwargs: Any,
) -> PlainTextContentBlock:
"""Create a `PlainTextContentBlock`.
Args:
text: The plaintext content.
url: URL of the plaintext file.
base64: Base64-encoded plaintext data.
file_id: ID of the plaintext file from a file storage system.
title: Title of the text data.
context: Context or description of the text content.
id: Content block identifier.
Generated automatically if not provided.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `PlainTextContentBlock`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
block = PlainTextContentBlock(
type="text-plain",
mime_type="text/plain",
id=ensure_id(id),
)
if text is not None:
block["text"] = text
if url is not None:
block["url"] = url
if base64 is not None:
block["base64"] = base64
if file_id is not None:
block["file_id"] = file_id
if title is not None:
block["title"] = title
if context is not None:
block["context"] = context
if index is not None:
block["index"] = index
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_tool_call(
name: str,
args: dict[str, Any],
*,
id: str | None = None,
index: int | str | None = None,
**kwargs: Any,
) -> ToolCall:
"""Create a `ToolCall`.
Args:
name: The name of the tool to be called.
args: The arguments to the tool call.
id: An identifier for the tool call.
Generated automatically if not provided.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `ToolCall`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
block = ToolCall(
type="tool_call",
name=name,
args=args,
id=ensure_id(id),
)
if index is not None:
block["index"] = index
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_reasoning_block(
reasoning: str | None = None,
id: str | None = None,
index: int | str | None = None,
**kwargs: Any,
) -> ReasoningContentBlock:
"""Create a `ReasoningContentBlock`.
Args:
reasoning: The reasoning text or thought summary.
id: Content block identifier.
Generated automatically if not provided.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `ReasoningContentBlock`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
block = ReasoningContentBlock(
type="reasoning",
reasoning=reasoning or "",
id=ensure_id(id),
)
if index is not None:
block["index"] = index
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_citation(
*,
url: str | None = None,
title: str | None = None,
start_index: int | None = None,
end_index: int | None = None,
cited_text: str | None = None,
id: str | None = None,
**kwargs: Any,
) -> Citation:
"""Create a `Citation`.
Args:
url: URL of the document source.
title: Source document title.
start_index: Start index in the response text where citation applies.
end_index: End index in the response text where citation applies.
cited_text: Excerpt of source text being cited.
id: Content block identifier.
Generated automatically if not provided.
Returns:
A properly formatted `Citation`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
block = Citation(type="citation", id=ensure_id(id))
if url is not None:
block["url"] = url
if title is not None:
block["title"] = title
if start_index is not None:
block["start_index"] = start_index
if end_index is not None:
block["end_index"] = end_index
if cited_text is not None:
block["cited_text"] = cited_text
extras = {k: v for k, v in kwargs.items() if v is not None}
if extras:
block["extras"] = extras
return block
def create_non_standard_block(
value: dict[str, Any],
*,
id: str | None = None,
index: int | str | None = None,
) -> NonStandardContentBlock:
"""Create a `NonStandardContentBlock`.
Args:
value: Provider-specific content data.
id: Content block identifier.
Generated automatically if not provided.
index: Index of block in aggregate response.
Used during streaming.
Returns:
A properly formatted `NonStandardContentBlock`.
!!! note
The `id` is generated automatically if not provided, using a UUID4 format
prefixed with `'lc_'` to indicate it is a LangChain-generated ID.
"""
block = NonStandardContentBlock(
type="non_standard",
value=value,
id=ensure_id(id),
)
if index is not None:
block["index"] = index
return block
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/messages/content.py",
"license": "MIT License",
"lines": 1074,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/core/tests/unit_tests/messages/block_translators/test_anthropic.py | from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage
from langchain_core.messages import content as types
def test_convert_to_v1_from_anthropic() -> None:
message = AIMessage(
[
{"type": "thinking", "thinking": "foo", "signature": "foo_signature"},
{"type": "text", "text": "Let's call a tool."},
{
"type": "tool_use",
"id": "abc_123",
"name": "get_weather",
"input": {"location": "San Francisco"},
},
{
"type": "tool_use",
"id": "abc_234",
"name": "get_weather_programmatic",
"input": {"location": "Boston"},
"caller": {
"type": "code_execution_20250825",
"tool_id": "srvtoolu_abc234",
},
},
{
"type": "text",
"text": "It's sunny.",
"citations": [
{
"type": "search_result_location",
"cited_text": "The weather is sunny.",
"source": "source_123",
"title": "Document Title",
"search_result_index": 1,
"start_block_index": 0,
"end_block_index": 2,
},
{"bar": "baz"},
],
},
{
"type": "server_tool_use",
"name": "web_search",
"input": {"query": "web search query"},
"id": "srvtoolu_abc123",
},
{
"type": "web_search_tool_result",
"tool_use_id": "srvtoolu_abc123",
"content": [
{
"type": "web_search_result",
"title": "Page Title 1",
"url": "<page url 1>",
"page_age": "January 1, 2025",
"encrypted_content": "<encrypted content 1>",
},
{
"type": "web_search_result",
"title": "Page Title 2",
"url": "<page url 2>",
"page_age": "January 2, 2025",
"encrypted_content": "<encrypted content 2>",
},
],
},
{
"type": "server_tool_use",
"id": "srvtoolu_def456",
"name": "code_execution",
"input": {"code": "import numpy as np..."},
},
{
"type": "code_execution_tool_result",
"tool_use_id": "srvtoolu_def456",
"content": {
"type": "code_execution_result",
"stdout": "Mean: 5.5\nStandard deviation...",
"stderr": "",
"return_code": 0,
},
},
{"type": "something_else", "foo": "bar"},
],
response_metadata={"model_provider": "anthropic"},
)
expected_content: list[types.ContentBlock] = [
{
"type": "reasoning",
"reasoning": "foo",
"extras": {"signature": "foo_signature"},
},
{"type": "text", "text": "Let's call a tool."},
{
"type": "tool_call",
"id": "abc_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
},
{
"type": "tool_call",
"id": "abc_234",
"name": "get_weather_programmatic",
"args": {"location": "Boston"},
"extras": {
"caller": {
"type": "code_execution_20250825",
"tool_id": "srvtoolu_abc234",
}
},
},
{
"type": "text",
"text": "It's sunny.",
"annotations": [
{
"type": "citation",
"title": "Document Title",
"cited_text": "The weather is sunny.",
"extras": {
"source": "source_123",
"search_result_index": 1,
"start_block_index": 0,
"end_block_index": 2,
},
},
{"type": "non_standard_annotation", "value": {"bar": "baz"}},
],
},
{
"type": "server_tool_call",
"name": "web_search",
"id": "srvtoolu_abc123",
"args": {"query": "web search query"},
},
{
"type": "server_tool_result",
"tool_call_id": "srvtoolu_abc123",
"output": [
{
"type": "web_search_result",
"title": "Page Title 1",
"url": "<page url 1>",
"page_age": "January 1, 2025",
"encrypted_content": "<encrypted content 1>",
},
{
"type": "web_search_result",
"title": "Page Title 2",
"url": "<page url 2>",
"page_age": "January 2, 2025",
"encrypted_content": "<encrypted content 2>",
},
],
"status": "success",
"extras": {"block_type": "web_search_tool_result"},
},
{
"type": "server_tool_call",
"name": "code_interpreter",
"id": "srvtoolu_def456",
"args": {"code": "import numpy as np..."},
},
{
"type": "server_tool_result",
"tool_call_id": "srvtoolu_def456",
"output": {
"type": "code_execution_result",
"return_code": 0,
"stdout": "Mean: 5.5\nStandard deviation...",
"stderr": "",
},
"status": "success",
"extras": {"block_type": "code_execution_tool_result"},
},
{
"type": "non_standard",
"value": {"type": "something_else", "foo": "bar"},
},
]
assert message.content_blocks == expected_content
# Check no mutation
assert message.content != expected_content
message = AIMessage("Hello", response_metadata={"model_provider": "anthropic"})
expected_content = [{"type": "text", "text": "Hello"}]
assert message.content_blocks == expected_content
assert message.content != expected_content # check no mutation
def test_convert_to_v1_from_anthropic_chunk() -> None:
chunks = [
AIMessageChunk(
content=[{"text": "Looking ", "type": "text", "index": 0}],
response_metadata={"model_provider": "anthropic"},
),
AIMessageChunk(
content=[{"text": "now.", "type": "text", "index": 0}],
response_metadata={"model_provider": "anthropic"},
),
AIMessageChunk(
content=[
{
"type": "tool_use",
"name": "get_weather",
"input": {},
"id": "toolu_abc123",
"index": 1,
}
],
tool_call_chunks=[
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": "",
"id": "toolu_abc123",
"index": 1,
}
],
response_metadata={"model_provider": "anthropic"},
),
AIMessageChunk(
content=[{"type": "input_json_delta", "partial_json": "", "index": 1}],
tool_call_chunks=[
{
"name": None,
"args": "",
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "anthropic"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": '{"loca', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": '{"loca',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "anthropic"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": 'tion": "San ', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": 'tion": "San ',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "anthropic"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": 'Francisco"}', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": 'Francisco"}',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "anthropic"},
),
]
expected_contents: list[types.ContentBlock] = [
{"type": "text", "text": "Looking ", "index": 0},
{"type": "text", "text": "now.", "index": 0},
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": "",
"id": "toolu_abc123",
"index": 1,
},
{"name": None, "args": "", "id": None, "index": 1, "type": "tool_call_chunk"},
{
"name": None,
"args": '{"loca',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
{
"name": None,
"args": 'tion": "San ',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
{
"name": None,
"args": 'Francisco"}',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
]
for chunk, expected in zip(chunks, expected_contents, strict=False):
assert chunk.content_blocks == [expected]
full: AIMessageChunk | None = None
for chunk in chunks:
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
expected_content = [
{"type": "text", "text": "Looking now.", "index": 0},
{
"type": "tool_use",
"name": "get_weather",
"partial_json": '{"location": "San Francisco"}',
"input": {},
"id": "toolu_abc123",
"index": 1,
},
]
assert full.content == expected_content
expected_content_blocks = [
{"type": "text", "text": "Looking now.", "index": 0},
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": '{"location": "San Francisco"}',
"id": "toolu_abc123",
"index": 1,
},
]
assert full.content_blocks == expected_content_blocks
# Test parse partial json
full = AIMessageChunk(
content=[
{
"id": "srvtoolu_abc123",
"input": {},
"name": "web_fetch",
"type": "server_tool_use",
"index": 0,
"partial_json": '{"url": "https://docs.langchain.com"}',
},
{
"id": "mcptoolu_abc123",
"input": {},
"name": "ask_question",
"server_name": "<my server name>",
"type": "mcp_tool_use",
"index": 1,
"partial_json": '{"repoName": "<my repo>", "question": "<my query>"}',
},
],
response_metadata={"model_provider": "anthropic"},
chunk_position="last",
)
expected_content_blocks = [
{
"type": "server_tool_call",
"name": "web_fetch",
"id": "srvtoolu_abc123",
"args": {"url": "https://docs.langchain.com"},
"index": 0,
},
{
"type": "server_tool_call",
"name": "remote_mcp",
"id": "mcptoolu_abc123",
"args": {"repoName": "<my repo>", "question": "<my query>"},
"extras": {"tool_name": "ask_question", "server_name": "<my server name>"},
"index": 1,
},
]
assert full.content_blocks == expected_content_blocks
def test_convert_to_v1_from_anthropic_input() -> None:
message = HumanMessage(
[
{"type": "text", "text": "foo"},
{
"type": "document",
"source": {
"type": "base64",
"data": "<base64 data>",
"media_type": "application/pdf",
},
},
{
"type": "document",
"source": {
"type": "url",
"url": "<document url>",
},
},
{
"type": "document",
"source": {
"type": "content",
"content": [
{"type": "text", "text": "The grass is green"},
{"type": "text", "text": "The sky is blue"},
],
},
"citations": {"enabled": True},
},
{
"type": "document",
"source": {
"type": "text",
"data": "<plain text data>",
"media_type": "text/plain",
},
},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "<base64 image data>",
},
},
{
"type": "image",
"source": {
"type": "url",
"url": "<image url>",
},
},
{
"type": "image",
"source": {
"type": "file",
"file_id": "<image file id>",
},
},
{
"type": "document",
"source": {"type": "file", "file_id": "<pdf file id>"},
},
]
)
expected: list[types.ContentBlock] = [
{"type": "text", "text": "foo"},
{
"type": "file",
"base64": "<base64 data>",
"mime_type": "application/pdf",
},
{
"type": "file",
"url": "<document url>",
},
{
"type": "non_standard",
"value": {
"type": "document",
"source": {
"type": "content",
"content": [
{"type": "text", "text": "The grass is green"},
{"type": "text", "text": "The sky is blue"},
],
},
"citations": {"enabled": True},
},
},
{
"type": "text-plain",
"text": "<plain text data>",
"mime_type": "text/plain",
},
{
"type": "image",
"base64": "<base64 image data>",
"mime_type": "image/jpeg",
},
{
"type": "image",
"url": "<image url>",
},
{
"type": "image",
"id": "<image file id>",
},
{
"type": "file",
"id": "<pdf file id>",
},
]
assert message.content_blocks == expected
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/messages/block_translators/test_anthropic.py",
"license": "MIT License",
"lines": 495,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/messages/block_translators/test_bedrock.py | from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage
from langchain_core.messages import content as types
def test_convert_to_v1_from_bedrock() -> None:
message = AIMessage(
[
{"type": "thinking", "thinking": "foo", "signature": "foo_signature"},
{"type": "text", "text": "Let's call a tool."},
{
"type": "tool_use",
"id": "abc_123",
"name": "get_weather",
"input": {"location": "San Francisco"},
},
{
"type": "text",
"text": "It's sunny.",
"citations": [
{
"type": "search_result_location",
"cited_text": "The weather is sunny.",
"source": "source_123",
"title": "Document Title",
"search_result_index": 1,
"start_block_index": 0,
"end_block_index": 2,
},
{"bar": "baz"},
],
},
{"type": "something_else", "foo": "bar"},
],
tool_calls=[
{
"type": "tool_call",
"id": "abc_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
},
{
"type": "tool_call",
"id": "abc_234",
"name": "another_tool",
"args": {"arg_1": "value_1"},
},
],
response_metadata={
"model_provider": "bedrock",
"model_name": "us.anthropic.claude-sonnet-4-20250514-v1:0",
},
)
expected_content: list[types.ContentBlock] = [
{
"type": "reasoning",
"reasoning": "foo",
"extras": {"signature": "foo_signature"},
},
{"type": "text", "text": "Let's call a tool."},
{
"type": "tool_call",
"id": "abc_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
},
{
"type": "text",
"text": "It's sunny.",
"annotations": [
{
"type": "citation",
"title": "Document Title",
"cited_text": "The weather is sunny.",
"extras": {
"source": "source_123",
"search_result_index": 1,
"start_block_index": 0,
"end_block_index": 2,
},
},
{"type": "non_standard_annotation", "value": {"bar": "baz"}},
],
},
{
"type": "non_standard",
"value": {"type": "something_else", "foo": "bar"},
},
{
"type": "tool_call",
"id": "abc_234",
"name": "another_tool",
"args": {"arg_1": "value_1"},
},
]
assert message.content_blocks == expected_content
# Check no mutation
assert message.content != expected_content
# Test with a non-Anthropic message
message = AIMessage(
[
{"type": "text", "text": "Let's call a tool."},
{"type": "something_else", "foo": "bar"},
],
tool_calls=[
{
"type": "tool_call",
"id": "abc_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
}
],
response_metadata={"model_provider": "bedrock"},
)
expected_content = [
{"type": "text", "text": "Let's call a tool."},
{
"type": "non_standard",
"value": {"type": "something_else", "foo": "bar"},
},
{
"type": "tool_call",
"id": "abc_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
},
]
assert message.content_blocks == expected_content
def test_convert_to_v1_from_bedrock_chunk() -> None:
chunks = [
AIMessageChunk(
content=[{"text": "Looking ", "type": "text", "index": 0}],
response_metadata={"model_provider": "bedrock"},
),
AIMessageChunk(
content=[{"text": "now.", "type": "text", "index": 0}],
response_metadata={"model_provider": "bedrock"},
),
AIMessageChunk(
content=[
{
"type": "tool_use",
"name": "get_weather",
"input": {},
"id": "toolu_abc123",
"index": 1,
}
],
tool_call_chunks=[
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": "",
"id": "toolu_abc123",
"index": 1,
}
],
response_metadata={"model_provider": "bedrock"},
),
AIMessageChunk(
content=[{"type": "input_json_delta", "partial_json": "", "index": 1}],
tool_call_chunks=[
{
"name": None,
"args": "",
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "bedrock"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": '{"loca', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": '{"loca',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "bedrock"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": 'tion": "San ', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": 'tion": "San ',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "bedrock"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": 'Francisco"}', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": 'Francisco"}',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "bedrock"},
),
]
expected_contents: list[types.ContentBlock] = [
{"type": "text", "text": "Looking ", "index": 0},
{"type": "text", "text": "now.", "index": 0},
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": "",
"id": "toolu_abc123",
"index": 1,
},
{"name": None, "args": "", "id": None, "index": 1, "type": "tool_call_chunk"},
{
"name": None,
"args": '{"loca',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
{
"name": None,
"args": 'tion": "San ',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
{
"name": None,
"args": 'Francisco"}',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
]
for chunk, expected in zip(chunks, expected_contents, strict=False):
assert chunk.content_blocks == [expected]
full: AIMessageChunk | None = None
for chunk in chunks:
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
expected_content = [
{"type": "text", "text": "Looking now.", "index": 0},
{
"type": "tool_use",
"name": "get_weather",
"partial_json": '{"location": "San Francisco"}',
"input": {},
"id": "toolu_abc123",
"index": 1,
},
]
assert full.content == expected_content
expected_content_blocks = [
{"type": "text", "text": "Looking now.", "index": 0},
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": '{"location": "San Francisco"}',
"id": "toolu_abc123",
"index": 1,
},
]
assert full.content_blocks == expected_content_blocks
def test_convert_to_v1_from_bedrock_input() -> None:
message = HumanMessage(
[
{"type": "text", "text": "foo"},
{
"type": "document",
"source": {
"type": "base64",
"data": "<base64 data>",
"media_type": "application/pdf",
},
},
{
"type": "document",
"source": {
"type": "url",
"url": "<document url>",
},
},
{
"type": "document",
"source": {
"type": "content",
"content": [
{"type": "text", "text": "The grass is green"},
{"type": "text", "text": "The sky is blue"},
],
},
"citations": {"enabled": True},
},
{
"type": "document",
"source": {
"type": "text",
"data": "<plain text data>",
"media_type": "text/plain",
},
},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "<base64 image data>",
},
},
{
"type": "image",
"source": {
"type": "url",
"url": "<image url>",
},
},
{
"type": "image",
"source": {
"type": "file",
"file_id": "<image file id>",
},
},
{
"type": "document",
"source": {"type": "file", "file_id": "<pdf file id>"},
},
]
)
expected: list[types.ContentBlock] = [
{"type": "text", "text": "foo"},
{
"type": "file",
"base64": "<base64 data>",
"mime_type": "application/pdf",
},
{
"type": "file",
"url": "<document url>",
},
{
"type": "non_standard",
"value": {
"type": "document",
"source": {
"type": "content",
"content": [
{"type": "text", "text": "The grass is green"},
{"type": "text", "text": "The sky is blue"},
],
},
"citations": {"enabled": True},
},
},
{
"type": "text-plain",
"text": "<plain text data>",
"mime_type": "text/plain",
},
{
"type": "image",
"base64": "<base64 image data>",
"mime_type": "image/jpeg",
},
{
"type": "image",
"url": "<image url>",
},
{
"type": "image",
"id": "<image file id>",
},
{
"type": "file",
"id": "<pdf file id>",
},
]
assert message.content_blocks == expected
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/messages/block_translators/test_bedrock.py",
"license": "MIT License",
"lines": 392,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.