index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/slack/__init__.py | """Slack toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/xorbits/__init__.py | from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_xorbits_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise ImportError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise AttributeError(f"{name} does not exist")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/jira/toolkit.py | from typing import Dict, List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.jira.prompt import (
JIRA_CATCH_ALL_PROMPT,
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
JIRA_GET_ALL_PROJECTS_PROMPT,
JIRA_ISSUE_CREATE_PROMPT,
JIRA_JQL_PROMPT,
)
from langchain_community.tools.jira.tool import JiraAction
from langchain_community.utilities.jira import JiraAPIWrapper
class JiraToolkit(BaseToolkit):
"""Jira Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
See https://python.langchain.com/docs/security for more information.
Parameters:
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
"""
tools: List[BaseTool] = []
@classmethod
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
"""Create a JiraToolkit from a JiraAPIWrapper.
Args:
jira_api_wrapper: JiraAPIWrapper. The Jira API wrapper.
Returns:
JiraToolkit. The Jira toolkit.
"""
operations: List[Dict] = [
{
"mode": "jql",
"name": "JQL Query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
"name": "Get Projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
"name": "Create Issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
"name": "Catch all Jira API call",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
"name": "Create confluence page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
tools = [
JiraAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=jira_api_wrapper,
)
for action in operations
]
return cls(tools=tools) # type: ignore[arg-type]
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/jira/__init__.py | """Jira Toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/spark_sql/base.py | """Spark SQL agent."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.callbacks import BaseCallbackManager, Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_community.agent_toolkits.spark_sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
if TYPE_CHECKING:
from langchain.agents.agent import AgentExecutor
def create_spark_sql_agent(
llm: BaseLanguageModel,
toolkit: SparkSQLToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
callbacks: Callbacks = None,
prefix: str = SQL_PREFIX,
suffix: str = SQL_SUFFIX,
format_instructions: Optional[str] = None,
input_variables: Optional[List[str]] = None,
top_k: int = 10,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a Spark SQL agent from an LLM and tools.
Args:
llm: The language model to use.
toolkit: The Spark SQL toolkit.
callback_manager: Optional. The callback manager. Default is None.
callbacks: Optional. The callbacks. Default is None.
prefix: Optional. The prefix for the prompt. Default is SQL_PREFIX.
suffix: Optional. The suffix for the prompt. Default is SQL_SUFFIX.
format_instructions: Optional. The format instructions for the prompt.
Default is None.
input_variables: Optional. The input variables for the prompt. Default is None.
top_k: Optional. The top k for the prompt. Default is 10.
max_iterations: Optional. The maximum iterations to run. Default is 15.
max_execution_time: Optional. The maximum execution time. Default is None.
early_stopping_method: Optional. The early stopping method. Default is "force".
verbose: Optional. Whether to print verbose output. Default is False.
agent_executor_kwargs: Optional. The agent executor kwargs. Default is None.
kwargs: Any. Additional keyword arguments.
Returns:
The agent executor.
"""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prefix = prefix.format(top_k=top_k)
prompt_params = (
{"format_instructions": format_instructions}
if format_instructions is not None
else {}
)
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
**prompt_params,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
callbacks=callbacks,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
callbacks=callbacks,
verbose=verbose,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/spark_sql/toolkit.py | """Toolkit for interacting with Spark SQL."""
from typing import List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from pydantic import ConfigDict, Field
from langchain_community.tools.spark_sql.tool import (
InfoSparkSQLTool,
ListSparkSQLTool,
QueryCheckerTool,
QuerySparkSQLTool,
)
from langchain_community.utilities.spark_sql import SparkSQL
class SparkSQLToolkit(BaseToolkit):
"""Toolkit for interacting with Spark SQL.
Parameters:
db: SparkSQL. The Spark SQL database.
llm: BaseLanguageModel. The language model.
"""
db: SparkSQL = Field(exclude=True)
llm: BaseLanguageModel = Field(exclude=True)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
QuerySparkSQLTool(db=self.db),
InfoSparkSQLTool(db=self.db),
ListSparkSQLTool(db=self.db),
QueryCheckerTool(db=self.db, llm=self.llm),
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/spark_sql/__init__.py | """Spark SQL agent."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/spark_sql/prompt.py | # flake8: noqa
SQL_PREFIX = """You are an agent designed to interact with Spark SQL.
Given an input question, create a syntactically correct Spark SQL query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
"""
SQL_SUFFIX = """Begin!
Question: {input}
Thought: I should look at the tables in the database to see what I can query.
{agent_scratchpad}"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/connery/toolkit.py | from typing import Any, List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from pydantic import model_validator
from langchain_community.tools.connery import ConneryService
class ConneryToolkit(BaseToolkit):
"""
Toolkit with a list of Connery Actions as tools.
Parameters:
tools (List[BaseTool]): The list of Connery Actions.
"""
tools: List[BaseTool]
def get_tools(self) -> List[BaseTool]:
"""
Returns the list of Connery Actions.
"""
return self.tools
@model_validator(mode="before")
@classmethod
def validate_attributes(cls, values: dict) -> Any:
"""
Validate the attributes of the ConneryToolkit class.
Args:
values (dict): The arguments to validate.
Returns:
dict: The validated arguments.
Raises:
ValueError: If the 'tools' attribute is not set
"""
if not values.get("tools"):
raise ValueError("The attribute 'tools' must be set.")
return values
@classmethod
def create_instance(cls, connery_service: ConneryService) -> "ConneryToolkit":
"""
Creates a Connery Toolkit using a Connery Service.
Parameters:
connery_service (ConneryService): The Connery Service
to get the list of Connery Actions.
Returns:
ConneryToolkit: The Connery Toolkit.
"""
instance = cls(tools=connery_service.list_actions()) # type: ignore[arg-type]
return instance
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/connery/__init__.py | """
This module contains the ConneryToolkit.
"""
from .toolkit import ConneryToolkit
__all__ = ["ConneryToolkit"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/file_management/toolkit.py | from __future__ import annotations
from typing import Any, Dict, List, Optional, Type
from langchain_core.tools import BaseTool, BaseToolkit
from langchain_core.utils.pydantic import get_fields
from pydantic import model_validator
from langchain_community.tools.file_management.copy import CopyFileTool
from langchain_community.tools.file_management.delete import DeleteFileTool
from langchain_community.tools.file_management.file_search import FileSearchTool
from langchain_community.tools.file_management.list_dir import ListDirectoryTool
from langchain_community.tools.file_management.move import MoveFileTool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
_FILE_TOOLS: List[Type[BaseTool]] = [
CopyFileTool,
DeleteFileTool,
FileSearchTool,
MoveFileTool,
ReadFileTool,
WriteFileTool,
ListDirectoryTool,
]
_FILE_TOOLS_MAP: Dict[str, Type[BaseTool]] = {
get_fields(tool_cls)["name"].default: tool_cls for tool_cls in _FILE_TOOLS
}
class FileManagementToolkit(BaseToolkit):
"""Toolkit for interacting with local files.
*Security Notice*: This toolkit provides methods to interact with local files.
If providing this toolkit to an agent on an LLM, ensure you scope
the agent's permissions to only include the necessary permissions
to perform the desired operations.
By **default** the agent will have access to all files within
the root dir and will be able to Copy, Delete, Move, Read, Write
and List files in that directory.
Consider the following:
- Limit access to particular directories using `root_dir`.
- Use filesystem permissions to restrict access and permissions to only
the files and directories required by the agent.
- Limit the tools available to the agent to only the file operations
necessary for the agent's intended use.
- Sandbox the agent by running it in a container.
See https://python.langchain.com/docs/security for more information.
Parameters:
root_dir: Optional. The root directory to perform file operations.
If not provided, file operations are performed relative to the current
working directory.
selected_tools: Optional. The tools to include in the toolkit. If not
provided, all tools are included.
"""
root_dir: Optional[str] = None
"""If specified, all file operations are made relative to root_dir."""
selected_tools: Optional[List[str]] = None
"""If provided, only provide the selected tools. Defaults to all."""
@model_validator(mode="before")
@classmethod
def validate_tools(cls, values: dict) -> Any:
selected_tools = values.get("selected_tools") or []
for tool_name in selected_tools:
if tool_name not in _FILE_TOOLS_MAP:
raise ValueError(
f"File Tool of name {tool_name} not supported."
f" Permitted tools: {list(_FILE_TOOLS_MAP)}"
)
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS_MAP
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS_MAP[tool]
tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore[call-arg]
return tools
__all__ = ["FileManagementToolkit"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/file_management/__init__.py | """Local file management toolkit."""
from langchain_community.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
__all__ = ["FileManagementToolkit"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/cassandra_database/toolkit.py | """Apache Cassandra Toolkit."""
from typing import List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from pydantic import ConfigDict, Field
from langchain_community.tools.cassandra_database.tool import (
GetSchemaCassandraDatabaseTool,
GetTableDataCassandraDatabaseTool,
QueryCassandraDatabaseTool,
)
from langchain_community.utilities.cassandra_database import CassandraDatabase
class CassandraDatabaseToolkit(BaseToolkit):
"""Toolkit for interacting with an Apache Cassandra database.
Parameters:
db: CassandraDatabase. The Cassandra database to interact
with.
"""
db: CassandraDatabase = Field(exclude=True)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
GetSchemaCassandraDatabaseTool(db=self.db),
QueryCassandraDatabaseTool(db=self.db),
GetTableDataCassandraDatabaseTool(db=self.db),
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/cassandra_database/__init__.py | """Apache Cassandra Toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/amadeus/toolkit.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from pydantic import ConfigDict, Field
from langchain_community.tools.amadeus.closest_airport import AmadeusClosestAirport
from langchain_community.tools.amadeus.flight_search import AmadeusFlightSearch
from langchain_community.tools.amadeus.utils import authenticate
if TYPE_CHECKING:
from amadeus import Client
class AmadeusToolkit(BaseToolkit):
"""Toolkit for interacting with Amadeus which offers APIs for travel.
Parameters:
client: Optional. The Amadeus client. Default is None.
llm: Optional. The language model to use. Default is None.
"""
client: Client = Field(default_factory=authenticate)
llm: Optional[BaseLanguageModel] = Field(default=None)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
AmadeusClosestAirport(llm=self.llm),
AmadeusFlightSearch(),
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/office365/toolkit.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from pydantic import ConfigDict, Field
from langchain_community.tools.office365.create_draft_message import (
O365CreateDraftMessage,
)
from langchain_community.tools.office365.events_search import O365SearchEvents
from langchain_community.tools.office365.messages_search import O365SearchEmails
from langchain_community.tools.office365.send_event import O365SendEvent
from langchain_community.tools.office365.send_message import O365SendMessage
from langchain_community.tools.office365.utils import authenticate
if TYPE_CHECKING:
from O365 import Account
class O365Toolkit(BaseToolkit):
"""Toolkit for interacting with Office 365.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
For example, this toolkit can be used search through emails and events,
send messages and event invites, and create draft messages.
Please make sure that the permissions given by this toolkit
are appropriate for your use case.
See https://python.langchain.com/docs/security for more information.
Parameters:
account: Optional. The Office 365 account. Default is None.
"""
account: Account = Field(default_factory=authenticate)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
O365SearchEvents(),
O365CreateDraftMessage(),
O365SearchEmails(),
O365SendEvent(),
O365SendMessage(),
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/office365/__init__.py | """Office365 toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/nasa/toolkit.py | from typing import Dict, List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.nasa.prompt import (
NASA_CAPTIONS_PROMPT,
NASA_MANIFEST_PROMPT,
NASA_METADATA_PROMPT,
NASA_SEARCH_PROMPT,
)
from langchain_community.tools.nasa.tool import NasaAction
from langchain_community.utilities.nasa import NasaAPIWrapper
class NasaToolkit(BaseToolkit):
"""Nasa Toolkit.
Parameters:
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
"""
tools: List[BaseTool] = []
@classmethod
def from_nasa_api_wrapper(cls, nasa_api_wrapper: NasaAPIWrapper) -> "NasaToolkit":
operations: List[Dict] = [
{
"mode": "search_media",
"name": "Search NASA Image and Video Library media",
"description": NASA_SEARCH_PROMPT,
},
{
"mode": "get_media_metadata_manifest",
"name": "Get NASA Image and Video Library media metadata manifest",
"description": NASA_MANIFEST_PROMPT,
},
{
"mode": "get_media_metadata_location",
"name": "Get NASA Image and Video Library media metadata location",
"description": NASA_METADATA_PROMPT,
},
{
"mode": "get_video_captions_location",
"name": "Get NASA Image and Video Library video captions location",
"description": NASA_CAPTIONS_PROMPT,
},
]
tools = [
NasaAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=nasa_api_wrapper,
)
for action in operations
]
return cls(tools=tools) # type: ignore[arg-type]
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/nasa/__init__.py | """NASA Toolkit"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/steam/toolkit.py | """Steam Toolkit."""
from typing import List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.steam.prompt import (
STEAM_GET_GAMES_DETAILS,
STEAM_GET_RECOMMENDED_GAMES,
)
from langchain_community.tools.steam.tool import SteamWebAPIQueryRun
from langchain_community.utilities.steam import SteamWebAPIWrapper
class SteamToolkit(BaseToolkit):
"""Steam Toolkit.
Parameters:
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
"""
tools: List[BaseTool] = []
@classmethod
def from_steam_api_wrapper(
cls, steam_api_wrapper: SteamWebAPIWrapper
) -> "SteamToolkit":
"""Create a Steam Toolkit from a Steam API Wrapper.
Args:
steam_api_wrapper: SteamWebAPIWrapper. The Steam API Wrapper.
Returns:
SteamToolkit. The Steam Toolkit.
"""
operations: List[dict] = [
{
"mode": "get_games_details",
"name": "Get Games Details",
"description": STEAM_GET_GAMES_DETAILS,
},
{
"mode": "get_recommended_games",
"name": "Get Recommended Games",
"description": STEAM_GET_RECOMMENDED_GAMES,
},
]
tools = [
SteamWebAPIQueryRun(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=steam_api_wrapper,
)
for action in operations
]
return cls(tools=tools) # type: ignore[arg-type]
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/steam/__init__.py | """Steam Toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/zapier/toolkit.py | """[DEPRECATED] Zapier Toolkit."""
from typing import List
from langchain_core._api import warn_deprecated
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.zapier.tool import ZapierNLARunAction
from langchain_community.utilities.zapier import ZapierNLAWrapper
class ZapierToolkit(BaseToolkit):
"""Zapier Toolkit.
Parameters:
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
"""
tools: List[BaseTool] = []
@classmethod
def from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Create a toolkit from a ZapierNLAWrapper.
Args:
zapier_nla_wrapper: ZapierNLAWrapper. The Zapier NLA wrapper.
Returns:
ZapierToolkit. The Zapier toolkit.
"""
actions = zapier_nla_wrapper.list()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools) # type: ignore[arg-type]
@classmethod
async def async_from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Async create a toolkit from a ZapierNLAWrapper.
Args:
zapier_nla_wrapper: ZapierNLAWrapper. The Zapier NLA wrapper.
Returns:
ZapierToolkit. The Zapier toolkit.
"""
actions = await zapier_nla_wrapper.alist()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools) # type: ignore[arg-type]
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
warn_deprecated(
since="0.0.319",
message=(
"This tool will be deprecated on 2023-11-17. See "
"<https://nla.zapier.com/sunset/> for details"
),
)
return self.tools
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/zapier/__init__.py | """Zapier Toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/nla/toolkit.py | from __future__ import annotations
from typing import Any, List, Optional, Sequence
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from pydantic import Field
from langchain_community.agent_toolkits.nla.tool import NLATool
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
from langchain_community.tools.plugin import AIPlugin
from langchain_community.utilities.requests import Requests
class NLAToolkit(BaseToolkit):
"""Natural Language API Toolkit.
*Security Note*: This toolkit creates tools that enable making calls
to an Open API compliant API.
The tools created by this toolkit may be able to make GET, POST,
PATCH, PUT, DELETE requests to any of the exposed endpoints on
the API.
Control access to who can use this toolkit.
See https://python.langchain.com/docs/security for more information.
"""
nla_tools: Sequence[NLATool] = Field(...)
"""List of API Endpoint Tools."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools for all the API operations."""
return list(self.nla_tools)
@staticmethod
def _get_http_operation_tools(
llm: BaseLanguageModel,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> List[NLATool]:
"""Get the tools for all the API operations."""
if not spec.paths:
return []
http_operation_tools = []
for path in spec.paths:
for method in spec.get_methods_for_path(path):
endpoint_tool = NLATool.from_llm_and_method(
llm=llm,
path=path,
method=method,
spec=spec,
requests=requests,
verbose=verbose,
**kwargs,
)
http_operation_tools.append(endpoint_tool)
return http_operation_tools
@classmethod
def from_llm_and_spec(
cls,
llm: BaseLanguageModel,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit by creating tools for each operation.
Args:
llm: The language model to use.
spec: The OpenAPI spec.
requests: Optional requests object. Default is None.
verbose: Whether to print verbose output. Default is False.
kwargs: Additional arguments.
Returns:
The toolkit.
"""
http_operation_tools = cls._get_http_operation_tools(
llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs
)
return cls(nla_tools=http_operation_tools)
@classmethod
def from_llm_and_url(
cls,
llm: BaseLanguageModel,
open_api_url: str,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL.
Args:
llm: The language model to use.
open_api_url: The URL of the OpenAPI spec.
requests: Optional requests object. Default is None.
verbose: Whether to print verbose output. Default is False.
kwargs: Additional arguments.
Returns:
The toolkit.
"""
spec = OpenAPISpec.from_url(open_api_url)
return cls.from_llm_and_spec(
llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs
)
@classmethod
def from_llm_and_ai_plugin(
cls,
llm: BaseLanguageModel,
ai_plugin: AIPlugin,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(ai_plugin.api.url)
# TODO: Merge optional Auth information with the `requests` argument
return cls.from_llm_and_spec(
llm=llm,
spec=spec,
requests=requests,
verbose=verbose,
**kwargs,
)
@classmethod
def from_llm_and_ai_plugin_url(
cls,
llm: BaseLanguageModel,
ai_plugin_url: str,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
plugin = AIPlugin.from_url(ai_plugin_url)
return cls.from_llm_and_ai_plugin(
llm=llm, ai_plugin=plugin, requests=requests, verbose=verbose, **kwargs
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/nla/tool.py | """Tool for interacting with a single API with natural language definition."""
from __future__ import annotations
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_community.chains.openapi.chain import OpenAPIEndpointChain
from langchain_community.tools.openapi.utils.api_models import APIOperation
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
from langchain_community.utilities.requests import Requests
class NLATool(Tool): # type: ignore[override]
"""Natural Language API Tool."""
@classmethod
def from_open_api_endpoint_chain(
cls, chain: OpenAPIEndpointChain, api_title: str
) -> "NLATool":
"""Convert an endpoint chain to an API endpoint tool.
Args:
chain: The endpoint chain.
api_title: The title of the API.
Returns:
The API endpoint tool.
"""
expanded_name = (
f'{api_title.replace(" ", "_")}.{chain.api_operation.operation_id}'
)
description = (
f"I'm an AI from {api_title}. Instruct what you want,"
" and I'll assist via an API with description:"
f" {chain.api_operation.description}"
)
return cls(name=expanded_name, func=chain.run, description=description)
@classmethod
def from_llm_and_method(
cls,
llm: BaseLanguageModel,
path: str,
method: str,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
**kwargs: Any,
) -> "NLATool":
"""Instantiate the tool from the specified path and method.
Args:
llm: The language model to use.
path: The path of the API.
method: The method of the API.
spec: The OpenAPI spec.
requests: Optional requests object. Default is None.
verbose: Whether to print verbose output. Default is False.
return_intermediate_steps: Whether to return intermediate steps.
Default is False.
kwargs: Additional arguments.
Returns:
The tool.
"""
api_operation = APIOperation.from_openapi_spec(spec, path, method)
chain = OpenAPIEndpointChain.from_api_operation(
api_operation,
llm,
requests=requests,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
return cls.from_open_api_endpoint_chain(chain, spec.info.title)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/clickup/toolkit.py | from typing import Dict, List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.clickup.prompt import (
CLICKUP_FOLDER_CREATE_PROMPT,
CLICKUP_GET_ALL_TEAMS_PROMPT,
CLICKUP_GET_FOLDERS_PROMPT,
CLICKUP_GET_LIST_PROMPT,
CLICKUP_GET_SPACES_PROMPT,
CLICKUP_GET_TASK_ATTRIBUTE_PROMPT,
CLICKUP_GET_TASK_PROMPT,
CLICKUP_LIST_CREATE_PROMPT,
CLICKUP_TASK_CREATE_PROMPT,
CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT,
CLICKUP_UPDATE_TASK_PROMPT,
)
from langchain_community.tools.clickup.tool import ClickupAction
from langchain_community.utilities.clickup import ClickupAPIWrapper
class ClickupToolkit(BaseToolkit):
"""Clickup Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
See https://python.langchain.com/docs/security for more information.
Parameters:
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
"""
tools: List[BaseTool] = []
@classmethod
def from_clickup_api_wrapper(
cls, clickup_api_wrapper: ClickupAPIWrapper
) -> "ClickupToolkit":
"""Create a ClickupToolkit from a ClickupAPIWrapper.
Args:
clickup_api_wrapper: ClickupAPIWrapper. The Clickup API wrapper.
Returns:
ClickupToolkit. The Clickup toolkit.
"""
operations: List[Dict] = [
{
"mode": "get_task",
"name": "Get task",
"description": CLICKUP_GET_TASK_PROMPT,
},
{
"mode": "get_task_attribute",
"name": "Get task attribute",
"description": CLICKUP_GET_TASK_ATTRIBUTE_PROMPT,
},
{
"mode": "get_teams",
"name": "Get Teams",
"description": CLICKUP_GET_ALL_TEAMS_PROMPT,
},
{
"mode": "create_task",
"name": "Create Task",
"description": CLICKUP_TASK_CREATE_PROMPT,
},
{
"mode": "create_list",
"name": "Create List",
"description": CLICKUP_LIST_CREATE_PROMPT,
},
{
"mode": "create_folder",
"name": "Create Folder",
"description": CLICKUP_FOLDER_CREATE_PROMPT,
},
{
"mode": "get_list",
"name": "Get all lists in the space",
"description": CLICKUP_GET_LIST_PROMPT,
},
{
"mode": "get_folders",
"name": "Get all folders in the workspace",
"description": CLICKUP_GET_FOLDERS_PROMPT,
},
{
"mode": "get_spaces",
"name": "Get all spaces in the workspace",
"description": CLICKUP_GET_SPACES_PROMPT,
},
{
"mode": "update_task",
"name": "Update task",
"description": CLICKUP_UPDATE_TASK_PROMPT,
},
{
"mode": "update_task_assignees",
"name": "Update task assignees",
"description": CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT,
},
]
tools = [
ClickupAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=clickup_api_wrapper,
)
for action in operations
]
return cls(tools=tools) # type: ignore[arg-type]
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/openapi/base.py | """OpenAPI spec agent."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_community.agent_toolkits.openapi.prompt import (
OPENAPI_PREFIX,
OPENAPI_SUFFIX,
)
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
if TYPE_CHECKING:
from langchain.agents.agent import AgentExecutor
def create_openapi_agent(
llm: BaseLanguageModel,
toolkit: OpenAPIToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = OPENAPI_PREFIX,
suffix: str = OPENAPI_SUFFIX,
format_instructions: Optional[str] = None,
input_variables: Optional[List[str]] = None,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
verbose: bool = False,
return_intermediate_steps: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct an OpenAPI agent from an LLM and tools.
*Security Note*: When creating an OpenAPI agent, check the permissions
and capabilities of the underlying toolkit.
For example, if the default implementation of OpenAPIToolkit
uses the RequestsToolkit which contains tools to make arbitrary
network requests against any URL (e.g., GET, POST, PATCH, PUT, DELETE),
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
Args:
llm: The language model to use.
toolkit: The OpenAPI toolkit.
callback_manager: Optional. The callback manager. Default is None.
prefix: Optional. The prefix for the prompt. Default is OPENAPI_PREFIX.
suffix: Optional. The suffix for the prompt. Default is OPENAPI_SUFFIX.
format_instructions: Optional. The format instructions for the prompt.
Default is None.
input_variables: Optional. The input variables for the prompt. Default is None.
max_iterations: Optional. The maximum number of iterations. Default is 15.
max_execution_time: Optional. The maximum execution time. Default is None.
early_stopping_method: Optional. The early stopping method. Default is "force".
verbose: Optional. Whether to print verbose output. Default is False.
return_intermediate_steps: Optional. Whether to return intermediate steps.
Default is False.
agent_executor_kwargs: Optional. Additional keyword arguments
for the agent executor.
kwargs: Additional arguments.
Returns:
The agent executor.
"""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prompt_params = (
{"format_instructions": format_instructions}
if format_instructions is not None
else {}
)
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
**prompt_params,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/openapi/toolkit.py | """Requests toolkit."""
from __future__ import annotations
from typing import Any, List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool, Tool
from langchain_core.tools.base import BaseToolkit
from langchain_community.agent_toolkits.json.base import create_json_agent
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
from langchain_community.agent_toolkits.openapi.prompt import DESCRIPTION
from langchain_community.tools.json.tool import JsonSpec
from langchain_community.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain_community.utilities.requests import TextRequestsWrapper
class RequestsToolkit(BaseToolkit):
"""Toolkit for making REST requests.
*Security Note*: This toolkit contains tools to make GET, POST, PATCH, PUT,
and DELETE requests to an API.
Exercise care in who is allowed to use this toolkit. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
Setup:
Install ``langchain-community``.
.. code-block:: bash
pip install -U langchain-community
Key init args:
requests_wrapper: langchain_community.utilities.requests.GenericRequestsWrapper
wrapper for executing requests.
allow_dangerous_requests: bool
Defaults to False. Must "opt-in" to using dangerous requests by setting to True.
Instantiate:
.. code-block:: python
from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit
from langchain_community.utilities.requests import TextRequestsWrapper
toolkit = RequestsToolkit(
requests_wrapper=TextRequestsWrapper(headers={}),
allow_dangerous_requests=ALLOW_DANGEROUS_REQUEST,
)
Tools:
.. code-block:: python
tools = toolkit.get_tools()
tools
.. code-block:: none
[RequestsGetTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),
RequestsPostTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),
RequestsPatchTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),
RequestsPutTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),
RequestsDeleteTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True)]
Use within an agent:
.. code-block:: python
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
api_spec = \"\"\"
openapi: 3.0.0
info:
title: JSONPlaceholder API
version: 1.0.0
servers:
- url: https://jsonplaceholder.typicode.com
paths:
/posts:
get:
summary: Get posts
parameters: &id001
- name: _limit
in: query
required: false
schema:
type: integer
example: 2
description: Limit the number of results
\"\"\"
system_message = \"\"\"
You have access to an API to help answer user queries.
Here is documentation on the API:
{api_spec}
\"\"\".format(api_spec=api_spec)
llm = ChatOpenAI(model="gpt-4o-mini")
agent_executor = create_react_agent(llm, tools, state_modifier=system_message)
example_query = "Fetch the top two posts. What are their titles?"
events = agent_executor.stream(
{"messages": [("user", example_query)]},
stream_mode="values",
)
for event in events:
event["messages"][-1].pretty_print()
.. code-block:: none
================================[1m Human Message [0m=================================
Fetch the top two posts. What are their titles?
==================================[1m Ai Message [0m==================================
Tool Calls:
requests_get (call_RV2SOyzCnV5h2sm4WPgG8fND)
Call ID: call_RV2SOyzCnV5h2sm4WPgG8fND
Args:
url: https://jsonplaceholder.typicode.com/posts?_limit=2
=================================[1m Tool Message [0m=================================
Name: requests_get
[
{
"userId": 1,
"id": 1,
"title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit",
"body": "quia et suscipit..."
},
{
"userId": 1,
"id": 2,
"title": "qui est esse",
"body": "est rerum tempore vitae..."
}
]
==================================[1m Ai Message [0m==================================
The titles of the top two posts are:
1. "sunt aut facere repellat provident occaecati excepturi optio reprehenderit"
2. "qui est esse"
""" # noqa: E501
requests_wrapper: TextRequestsWrapper
"""The requests wrapper."""
allow_dangerous_requests: bool = False
"""Allow dangerous requests. See documentation for details."""
def get_tools(self) -> List[BaseTool]:
"""Return a list of tools."""
return [
RequestsGetTool(
requests_wrapper=self.requests_wrapper,
allow_dangerous_requests=self.allow_dangerous_requests,
),
RequestsPostTool(
requests_wrapper=self.requests_wrapper,
allow_dangerous_requests=self.allow_dangerous_requests,
),
RequestsPatchTool(
requests_wrapper=self.requests_wrapper,
allow_dangerous_requests=self.allow_dangerous_requests,
),
RequestsPutTool(
requests_wrapper=self.requests_wrapper,
allow_dangerous_requests=self.allow_dangerous_requests,
),
RequestsDeleteTool(
requests_wrapper=self.requests_wrapper,
allow_dangerous_requests=self.allow_dangerous_requests,
),
]
class OpenAPIToolkit(BaseToolkit):
"""Toolkit for interacting with an OpenAPI API.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
For example, this toolkit can be used to delete data exposed via
an OpenAPI compliant API.
"""
json_agent: Any
"""The JSON agent."""
requests_wrapper: TextRequestsWrapper
"""The requests wrapper."""
allow_dangerous_requests: bool = False
"""Allow dangerous requests. See documentation for details."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
json_agent_tool = Tool(
name="json_explorer",
func=self.json_agent.run,
description=DESCRIPTION,
)
request_toolkit = RequestsToolkit(
requests_wrapper=self.requests_wrapper,
allow_dangerous_requests=self.allow_dangerous_requests,
)
return [*request_toolkit.get_tools(), json_agent_tool]
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
json_spec: JsonSpec,
requests_wrapper: TextRequestsWrapper,
allow_dangerous_requests: bool = False,
**kwargs: Any,
) -> OpenAPIToolkit:
"""Create json agent from llm, then initialize."""
json_agent = create_json_agent(llm, JsonToolkit(spec=json_spec), **kwargs)
return cls(
json_agent=json_agent,
requests_wrapper=requests_wrapper,
allow_dangerous_requests=allow_dangerous_requests,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/openapi/spec.py | """Quick and dirty representation for OpenAPI specs."""
from dataclasses import dataclass
from typing import List, Tuple
from langchain_core.utils.json_schema import dereference_refs
@dataclass(frozen=True)
class ReducedOpenAPISpec:
"""A reduced OpenAPI spec.
This is a quick and dirty representation for OpenAPI specs.
Parameters:
servers: The servers in the spec.
description: The description of the spec.
endpoints: The endpoints in the spec.
"""
servers: List[dict]
description: str
endpoints: List[Tuple[str, str, dict]]
def reduce_openapi_spec(spec: dict, dereference: bool = True) -> ReducedOpenAPISpec:
"""Simplify/distill/minify a spec somehow.
I want a smaller target for retrieval and (more importantly)
I want smaller results from retrieval.
I was hoping https://openapi.tools/ would have some useful bits
to this end, but doesn't seem so.
Args:
spec: The OpenAPI spec.
dereference: Whether to dereference the spec. Default is True.
Returns:
ReducedOpenAPISpec: The reduced OpenAPI spec.
"""
# 1. Consider only get, post, patch, put, delete endpoints.
endpoints = [
(f"{operation_name.upper()} {route}", docs.get("description"), docs)
for route, operation in spec["paths"].items()
for operation_name, docs in operation.items()
if operation_name in ["get", "post", "patch", "put", "delete"]
]
# 2. Replace any refs so that complete docs are retrieved.
# Note: probably want to do this post-retrieval, it blows up the size of the spec.
if dereference:
endpoints = [
(name, description, dereference_refs(docs, full_schema=spec))
for name, description, docs in endpoints
]
# 3. Strip docs down to required request args + happy path response.
def reduce_endpoint_docs(docs: dict) -> dict:
out = {}
if docs.get("description"):
out["description"] = docs.get("description")
if docs.get("parameters"):
out["parameters"] = [
parameter
for parameter in docs.get("parameters", [])
if parameter.get("required")
]
if "200" in docs["responses"]:
out["responses"] = docs["responses"]["200"]
if docs.get("requestBody"):
out["requestBody"] = docs.get("requestBody")
return out
endpoints = [
(name, description, reduce_endpoint_docs(docs))
for name, description, docs in endpoints
]
return ReducedOpenAPISpec(
servers=spec["servers"],
description=spec["info"].get("description", ""),
endpoints=endpoints,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/openapi/planner_prompt.py | # flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
API_PLANNER_PROMPT = """You are a planner that plans a sequence of API calls to assist with user queries against an API.
You should:
1) evaluate whether the user query can be solved by the API documented below. If no, say why.
2) if yes, generate a plan of API calls and say what they are doing step by step.
3) If the plan includes a DELETE call, you should always return an ask from the User for authorization first unless the User has specifically asked to delete something.
You should only use API endpoints documented below ("Endpoints you can use:").
You can only use the DELETE tool if the User has specifically asked to delete something. Otherwise, you should return a request authorization from the User first.
Some user queries can be resolved in a single API call, but some will require several API calls.
The plan will be passed to an API controller that can format it into web requests and return the responses.
----
Here are some examples:
Fake endpoints for examples:
GET /user to get information about the current user
GET /products/search search across products
POST /users/{{id}}/cart to add products to a user's cart
PATCH /users/{{id}}/cart to update a user's cart
PUT /users/{{id}}/coupon to apply idempotent coupon to a user's cart
DELETE /users/{{id}}/cart to delete a user's cart
User query: tell me a joke
Plan: Sorry, this API's domain is shopping, not comedy.
User query: I want to buy a couch
Plan: 1. GET /products with a query param to search for couches
2. GET /user to find the user's id
3. POST /users/{{id}}/cart to add a couch to the user's cart
User query: I want to add a lamp to my cart
Plan: 1. GET /products with a query param to search for lamps
2. GET /user to find the user's id
3. PATCH /users/{{id}}/cart to add a lamp to the user's cart
User query: I want to add a coupon to my cart
Plan: 1. GET /user to find the user's id
2. PUT /users/{{id}}/coupon to apply the coupon
User query: I want to delete my cart
Plan: 1. GET /user to find the user's id
2. DELETE required. Did user specify DELETE or previously authorize? Yes, proceed.
3. DELETE /users/{{id}}/cart to delete the user's cart
User query: I want to start a new cart
Plan: 1. GET /user to find the user's id
2. DELETE required. Did user specify DELETE or previously authorize? No, ask for authorization.
3. Are you sure you want to delete your cart?
----
Here are endpoints you can use. Do not reference any of the endpoints above.
{endpoints}
----
User query: {query}
Plan:"""
API_PLANNER_TOOL_NAME = "api_planner"
API_PLANNER_TOOL_DESCRIPTION = f"Can be used to generate the right API calls to assist with a user query, like {API_PLANNER_TOOL_NAME}(query). Should always be called before trying to call the API controller."
# Execution.
API_CONTROLLER_PROMPT = """You are an agent that gets a sequence of API calls and given their documentation, should execute them and return the final response.
If you cannot complete them and run into issues, you should explain the issue. If you're unable to resolve an API call, you can retry the API call. When interacting with API objects, you should extract ids for inputs to other API calls but ids and names for outputs returned to the User.
Here is documentation on the API:
Base url: {api_url}
Endpoints:
{api_docs}
Here are tools to execute requests against the API: {tool_descriptions}
Starting below, you should follow this format:
Plan: the plan of API calls to execute
Thought: you should always think about what to do
Action: the action to take, should be one of the tools [{tool_names}]
Action Input: the input to the action
Observation: the output of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I am finished executing the plan (or, I cannot finish executing the plan without knowing some other information.)
Final Answer: the final output from executing the plan or missing information I'd need to re-plan correctly.
Begin!
Plan: {input}
Thought:
{agent_scratchpad}
"""
API_CONTROLLER_TOOL_NAME = "api_controller"
API_CONTROLLER_TOOL_DESCRIPTION = f"Can be used to execute a plan of API calls, like {API_CONTROLLER_TOOL_NAME}(plan)."
# Orchestrate planning + execution.
# The goal is to have an agent at the top-level (e.g. so it can recover from errors and re-plan) while
# keeping planning (and specifically the planning prompt) simple.
API_ORCHESTRATOR_PROMPT = """You are an agent that assists with user queries against API, things like querying information or creating resources.
Some user queries can be resolved in a single API call, particularly if you can find appropriate params from the OpenAPI spec; though some require several API calls.
You should always plan your API calls first, and then execute the plan second.
If the plan includes a DELETE call, be sure to ask the User for authorization first unless the User has specifically asked to delete something.
You should never return information without executing the api_controller tool.
Here are the tools to plan and execute API requests: {tool_descriptions}
Starting below, you should follow this format:
User query: the query a User wants help with related to the API
Thought: you should always think about what to do
Action: the action to take, should be one of the tools [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I am finished executing a plan and have the information the user asked for or the data the user asked to create
Final Answer: the final output from executing the plan
Example:
User query: can you add some trendy stuff to my shopping cart.
Thought: I should plan API calls first.
Action: api_planner
Action Input: I need to find the right API calls to add trendy items to the users shopping cart
Observation: 1) GET /items with params 'trending' is 'True' to get trending item ids
2) GET /user to get user
3) POST /cart to post the trending items to the user's cart
Thought: I'm ready to execute the API calls.
Action: api_controller
Action Input: 1) GET /items params 'trending' is 'True' to get trending item ids
2) GET /user to get user
3) POST /cart to post the trending items to the user's cart
...
Begin!
User query: {input}
Thought: I should generate a plan to help with this query and then copy that plan exactly to the controller.
{agent_scratchpad}"""
REQUESTS_GET_TOOL_DESCRIPTION = """Use this to GET content from a website.
Input to the tool should be a json string with 3 keys: "url", "params" and "output_instructions".
The value of "url" should be a string.
The value of "params" should be a dict of the needed and available parameters from the OpenAPI spec related to the endpoint.
If parameters are not needed, or not available, leave it empty.
The value of "output_instructions" should be instructions on what information to extract from the response,
for example the id(s) for a resource(s) that the GET request fetches.
"""
PARSING_GET_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_POST_TOOL_DESCRIPTION = """Use this when you want to POST to a website.
Input to the tool should be a json string with 3 keys: "url", "data", and "output_instructions".
The value of "url" should be a string.
The value of "data" should be a dictionary of key-value pairs you want to POST to the url.
The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the POST request creates.
Always use double quotes for strings in the json string."""
PARSING_POST_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_PATCH_TOOL_DESCRIPTION = """Use this when you want to PATCH content on a website.
Input to the tool should be a json string with 3 keys: "url", "data", and "output_instructions".
The value of "url" should be a string.
The value of "data" should be a dictionary of key-value pairs of the body params available in the OpenAPI spec you want to PATCH the content with at the url.
The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the PATCH request creates.
Always use double quotes for strings in the json string."""
PARSING_PATCH_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_PUT_TOOL_DESCRIPTION = """Use this when you want to PUT to a website.
Input to the tool should be a json string with 3 keys: "url", "data", and "output_instructions".
The value of "url" should be a string.
The value of "data" should be a dictionary of key-value pairs you want to PUT to the url.
The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the PUT request creates.
Always use double quotes for strings in the json string."""
PARSING_PUT_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
REQUESTS_DELETE_TOOL_DESCRIPTION = """ONLY USE THIS TOOL WHEN THE USER HAS SPECIFICALLY REQUESTED TO DELETE CONTENT FROM A WEBSITE.
Input to the tool should be a json string with 2 keys: "url", and "output_instructions".
The value of "url" should be a string.
The value of "output_instructions" should be instructions on what information to extract from the response, for example the id(s) for a resource(s) that the DELETE request creates.
Always use double quotes for strings in the json string.
ONLY USE THIS TOOL IF THE USER HAS SPECIFICALLY REQUESTED TO DELETE SOMETHING."""
PARSING_DELETE_PROMPT = PromptTemplate(
template="""Here is an API response:\n\n{response}\n\n====
Your task is to extract some information according to these instructions: {instructions}
When working with API objects, you should usually use ids over names. Do not return any ids or names that are not in the response.
If the response indicates an error, you should instead output a summary of the error.
Output:""",
input_variables=["response", "instructions"],
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/openapi/planner.py | """Agent that interacts with OpenAPI APIs via a hierarchical planning approach."""
import json
import re
from functools import partial
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, cast
import yaml
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.tools import BaseTool, Tool
from pydantic import Field
from langchain_community.agent_toolkits.openapi.planner_prompt import (
API_CONTROLLER_PROMPT,
API_CONTROLLER_TOOL_DESCRIPTION,
API_CONTROLLER_TOOL_NAME,
API_ORCHESTRATOR_PROMPT,
API_PLANNER_PROMPT,
API_PLANNER_TOOL_DESCRIPTION,
API_PLANNER_TOOL_NAME,
PARSING_DELETE_PROMPT,
PARSING_GET_PROMPT,
PARSING_PATCH_PROMPT,
PARSING_POST_PROMPT,
PARSING_PUT_PROMPT,
REQUESTS_DELETE_TOOL_DESCRIPTION,
REQUESTS_GET_TOOL_DESCRIPTION,
REQUESTS_PATCH_TOOL_DESCRIPTION,
REQUESTS_POST_TOOL_DESCRIPTION,
REQUESTS_PUT_TOOL_DESCRIPTION,
)
from langchain_community.agent_toolkits.openapi.spec import ReducedOpenAPISpec
from langchain_community.llms import OpenAI
from langchain_community.tools.requests.tool import BaseRequestsTool
from langchain_community.utilities.requests import RequestsWrapper
#
# Requests tools with LLM-instructed extraction of truncated responses.
#
# Of course, truncating so bluntly may lose a lot of valuable
# information in the response.
# However, the goal for now is to have only a single inference step.
MAX_RESPONSE_LENGTH = 5000
"""Maximum length of the response to be returned."""
Operation = Literal["GET", "POST", "PUT", "DELETE", "PATCH"]
def _get_default_llm_chain(prompt: BasePromptTemplate) -> Any:
from langchain.chains.llm import LLMChain
return LLMChain(
llm=OpenAI(),
prompt=prompt,
)
def _get_default_llm_chain_factory(
prompt: BasePromptTemplate,
) -> Callable[[], Any]:
"""Returns a default LLMChain factory."""
return partial(_get_default_llm_chain, prompt)
class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Requests GET tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_get"
"""Tool name."""
description: str = REQUESTS_GET_TOOL_DESCRIPTION
"""Tool description."""
response_length: int = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: Any = Field(
default_factory=_get_default_llm_chain_factory(PARSING_GET_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
data_params = data.get("params")
response: str = cast(
str, self.requests_wrapper.get(data["url"], params=data_params)
)
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Requests POST tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_post"
"""Tool name."""
description: str = REQUESTS_POST_TOOL_DESCRIPTION
"""Tool description."""
response_length: int = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: Any = Field(
default_factory=_get_default_llm_chain_factory(PARSING_POST_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
response: str = cast(str, self.requests_wrapper.post(data["url"], data["data"]))
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Requests PATCH tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_patch"
"""Tool name."""
description: str = REQUESTS_PATCH_TOOL_DESCRIPTION
"""Tool description."""
response_length: int = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: Any = Field(
default_factory=_get_default_llm_chain_factory(PARSING_PATCH_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
response: str = cast(
str, self.requests_wrapper.patch(data["url"], data["data"])
)
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Requests PUT tool with LLM-instructed extraction of truncated responses."""
name: str = "requests_put"
"""Tool name."""
description: str = REQUESTS_PUT_TOOL_DESCRIPTION
"""Tool description."""
response_length: int = MAX_RESPONSE_LENGTH
"""Maximum length of the response to be returned."""
llm_chain: Any = Field(
default_factory=_get_default_llm_chain_factory(PARSING_PUT_PROMPT)
)
"""LLMChain used to extract the response."""
def _run(self, text: str) -> str:
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
response: str = cast(str, self.requests_wrapper.put(data["url"], data["data"]))
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Tool that sends a DELETE request and parses the response."""
name: str = "requests_delete"
"""The name of the tool."""
description: str = REQUESTS_DELETE_TOOL_DESCRIPTION
"""The description of the tool."""
response_length: Optional[int] = MAX_RESPONSE_LENGTH
"""The maximum length of the response."""
llm_chain: Any = Field(
default_factory=_get_default_llm_chain_factory(PARSING_DELETE_PROMPT)
)
"""The LLM chain used to parse the response."""
def _run(self, text: str) -> str:
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
response: str = cast(str, self.requests_wrapper.delete(data["url"]))
response = response[: self.response_length]
return self.llm_chain.predict(
response=response, instructions=data["output_instructions"]
).strip()
async def _arun(self, text: str) -> str:
raise NotImplementedError()
#
# Orchestrator, planner, controller.
#
def _create_api_planner_tool(
api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel
) -> Tool:
from langchain.chains.llm import LLMChain
endpoint_descriptions = [
f"{name} {description}" for name, description, _ in api_spec.endpoints
]
prompt = PromptTemplate(
template=API_PLANNER_PROMPT,
input_variables=["query"],
partial_variables={"endpoints": "- " + "- ".join(endpoint_descriptions)},
)
chain = LLMChain(llm=llm, prompt=prompt)
tool = Tool(
name=API_PLANNER_TOOL_NAME,
description=API_PLANNER_TOOL_DESCRIPTION,
func=chain.run,
)
return tool
def _create_api_controller_agent(
api_url: str,
api_docs: str,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
allow_dangerous_requests: bool,
allowed_operations: Sequence[Operation],
) -> Any:
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools: List[BaseTool] = []
if "GET" in allowed_operations:
get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT)
tools.append(
RequestsGetToolWithParsing( # type: ignore[call-arg]
requests_wrapper=requests_wrapper,
llm_chain=get_llm_chain,
allow_dangerous_requests=allow_dangerous_requests,
)
)
if "POST" in allowed_operations:
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
tools.append(
RequestsPostToolWithParsing( # type: ignore[call-arg]
requests_wrapper=requests_wrapper,
llm_chain=post_llm_chain,
allow_dangerous_requests=allow_dangerous_requests,
)
)
if "PUT" in allowed_operations:
put_llm_chain = LLMChain(llm=llm, prompt=PARSING_PUT_PROMPT)
tools.append(
RequestsPutToolWithParsing( # type: ignore[call-arg]
requests_wrapper=requests_wrapper,
llm_chain=put_llm_chain,
allow_dangerous_requests=allow_dangerous_requests,
)
)
if "DELETE" in allowed_operations:
delete_llm_chain = LLMChain(llm=llm, prompt=PARSING_DELETE_PROMPT)
tools.append(
RequestsDeleteToolWithParsing( # type: ignore[call-arg]
requests_wrapper=requests_wrapper,
llm_chain=delete_llm_chain,
allow_dangerous_requests=allow_dangerous_requests,
)
)
if "PATCH" in allowed_operations:
patch_llm_chain = LLMChain(llm=llm, prompt=PARSING_PATCH_PROMPT)
tools.append(
RequestsPatchToolWithParsing( # type: ignore[call-arg]
requests_wrapper=requests_wrapper,
llm_chain=patch_llm_chain,
allow_dangerous_requests=allow_dangerous_requests,
)
)
if not tools:
raise ValueError("Tools not found")
prompt = PromptTemplate(
template=API_CONTROLLER_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"api_url": api_url,
"api_docs": api_docs,
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt),
allowed_tools=[tool.name for tool in tools],
)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def _create_api_controller_tool(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
allow_dangerous_requests: bool,
allowed_operations: Sequence[Operation],
) -> Tool:
"""Expose controller as a tool.
The tool is invoked with a plan from the planner, and dynamically
creates a controller agent with relevant documentation only to
constrain the context.
"""
base_url = api_spec.servers[0]["url"] # TODO: do better.
def _create_and_run_api_controller_agent(plan_str: str) -> str:
pattern = r"\b(GET|POST|PATCH|DELETE|PUT)\s+(/\S+)*"
matches = re.findall(pattern, plan_str)
endpoint_names = [
"{method} {route}".format(method=method, route=route.split("?")[0])
for method, route in matches
]
docs_str = ""
for endpoint_name in endpoint_names:
found_match = False
for name, _, docs in api_spec.endpoints:
regex_name = re.compile(re.sub("\\{.*?\\}", ".*", name))
if regex_name.match(endpoint_name):
found_match = True
docs_str += f"== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n"
if not found_match:
raise ValueError(f"{endpoint_name} endpoint does not exist.")
agent = _create_api_controller_agent(
base_url,
docs_str,
requests_wrapper,
llm,
allow_dangerous_requests,
allowed_operations,
)
return agent.run(plan_str)
return Tool(
name=API_CONTROLLER_TOOL_NAME,
func=_create_and_run_api_controller_agent,
description=API_CONTROLLER_TOOL_DESCRIPTION,
)
def create_openapi_agent(
api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper,
llm: BaseLanguageModel,
shared_memory: Optional[Any] = None,
callback_manager: Optional[BaseCallbackManager] = None,
verbose: bool = True,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
allow_dangerous_requests: bool = False,
allowed_operations: Sequence[Operation] = ("GET", "POST"),
**kwargs: Any,
) -> Any:
"""Construct an OpenAI API planner and controller for a given spec.
Inject credentials via requests_wrapper.
We use a top-level "orchestrator" agent to invoke the planner and controller,
rather than a top-level planner
that invokes a controller with its plan. This is to keep the planner simple.
You need to set allow_dangerous_requests to True to use Agent with BaseRequestsTool.
Requests can be dangerous and can lead to security vulnerabilities.
For example, users can ask a server to make a request to an internal
server. It's recommended to use requests through a proxy server
and avoid accepting inputs from untrusted sources without proper sandboxing.
Please see: https://python.langchain.com/docs/security
for further security information.
Args:
api_spec: The OpenAPI spec.
requests_wrapper: The requests wrapper.
llm: The language model.
shared_memory: Optional. The shared memory. Default is None.
callback_manager: Optional. The callback manager. Default is None.
verbose: Optional. Whether to print verbose output. Default is True.
agent_executor_kwargs: Optional. Additional keyword arguments
for the agent executor.
allow_dangerous_requests: Optional. Whether to allow dangerous requests.
Default is False.
allowed_operations: Optional. The allowed operations.
Default is ("GET", "POST").
kwargs: Additional arguments.
Returns:
The agent executor.
"""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = [
_create_api_planner_tool(api_spec, llm),
_create_api_controller_tool(
api_spec,
requests_wrapper,
llm,
allow_dangerous_requests,
allowed_operations,
),
]
prompt = PromptTemplate(
template=API_ORCHESTRATOR_PROMPT,
input_variables=["input", "agent_scratchpad"],
partial_variables={
"tool_names": ", ".join([tool.name for tool in tools]),
"tool_descriptions": "\n".join(
[f"{tool.name}: {tool.description}" for tool in tools]
),
},
)
agent = ZeroShotAgent(
llm_chain=LLMChain(llm=llm, prompt=prompt, memory=shared_memory),
allowed_tools=[tool.name for tool in tools],
**kwargs,
)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/openapi/__init__.py | """OpenAPI spec agent."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/openapi/prompt.py | # flake8: noqa
OPENAPI_PREFIX = """You are an agent designed to answer questions by making web requests to an API given the openapi spec.
If the question does not seem related to the API, return I don't know. Do not make up an answer.
Only use information provided by the tools to construct your response.
First, find the base URL needed to make the request.
Second, find the relevant paths needed to answer the question. Take note that, sometimes, you might need to make more than one request to more than one path to answer the question.
Third, find the required parameters needed to make the request. For GET requests, these are usually URL parameters and for POST requests, these are request body parameters.
Fourth, make the requests needed to answer the question. Ensure that you are sending the correct parameters to the request by checking which parameters are required. For parameters with a fixed set of values, please use the spec to look at which values are allowed.
Use the exact parameter names as listed in the spec, do not make up any names or abbreviate the names of parameters.
If you get a not found error, ensure that you are using a path that actually exists in the spec.
"""
OPENAPI_SUFFIX = """Begin!
Question: {input}
Thought: I should explore the spec to find the base server url for the API in the servers node.
{agent_scratchpad}"""
DESCRIPTION = """Can be used to answer questions about the openapi spec for the API. Always use this tool before trying to make a request.
Example inputs to this tool:
'What are the required query parameters for a GET request to the /bar endpoint?`
'What are the required parameters in the request body for a POST request to the /foo endpoint?'
Always give this tool a specific question."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/polygon/toolkit.py | from typing import List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.polygon import (
PolygonAggregates,
PolygonFinancials,
PolygonLastQuote,
PolygonTickerNews,
)
from langchain_community.utilities.polygon import PolygonAPIWrapper
class PolygonToolkit(BaseToolkit):
"""Polygon Toolkit.
Parameters:
tools: List[BaseTool]. The tools in the toolkit.
"""
tools: List[BaseTool] = []
@classmethod
def from_polygon_api_wrapper(
cls, polygon_api_wrapper: PolygonAPIWrapper
) -> "PolygonToolkit":
"""Create a Polygon Toolkit from a Polygon API Wrapper.
Args:
polygon_api_wrapper: PolygonAPIWrapper. The Polygon API Wrapper.
Returns:
PolygonToolkit. The Polygon Toolkit.
"""
tools = [
PolygonAggregates(
api_wrapper=polygon_api_wrapper,
),
PolygonLastQuote(
api_wrapper=polygon_api_wrapper,
),
PolygonTickerNews(
api_wrapper=polygon_api_wrapper,
),
PolygonFinancials(
api_wrapper=polygon_api_wrapper,
),
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/polygon/__init__.py | """Polygon Toolkit"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/playwright/toolkit.py | """Playwright web browser toolkit."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, Type, cast
from langchain_core.tools import BaseTool, BaseToolkit
from pydantic import ConfigDict, model_validator
from langchain_community.tools.playwright.base import (
BaseBrowserTool,
lazy_import_playwright_browsers,
)
from langchain_community.tools.playwright.click import ClickTool
from langchain_community.tools.playwright.current_page import CurrentWebPageTool
from langchain_community.tools.playwright.extract_hyperlinks import (
ExtractHyperlinksTool,
)
from langchain_community.tools.playwright.extract_text import ExtractTextTool
from langchain_community.tools.playwright.get_elements import GetElementsTool
from langchain_community.tools.playwright.navigate import NavigateTool
from langchain_community.tools.playwright.navigate_back import NavigateBackTool
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
else:
try:
# We do this so pydantic can resolve the types when instantiating
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
except ImportError:
pass
class PlayWrightBrowserToolkit(BaseToolkit):
"""Toolkit for PlayWright browser tools.
**Security Note**: This toolkit provides code to control a web-browser.
Careful if exposing this toolkit to end-users. The tools in the toolkit
are capable of navigating to arbitrary webpages, clicking on arbitrary
elements, and extracting arbitrary text and hyperlinks from webpages.
Specifically, by default this toolkit allows navigating to:
- Any URL (including any internal network URLs)
- And local files
If exposing to end-users, consider limiting network access to the
server that hosts the agent; in addition, consider it is advised
to create a custom NavigationTool wht an args_schema that limits the URLs
that can be navigated to (e.g., only allow navigating to URLs that
start with a particular prefix).
Remember to scope permissions to the minimal permissions necessary for
the application. If the default tool selection is not appropriate for
the application, consider creating a custom toolkit with the appropriate
tools.
See https://python.langchain.com/docs/security for more information.
Parameters:
sync_browser: Optional. The sync browser. Default is None.
async_browser: Optional. The async browser. Default is None.
"""
sync_browser: Optional["SyncBrowser"] = None
async_browser: Optional["AsyncBrowser"] = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_imports_and_browser_provided(cls, values: dict) -> Any:
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get("async_browser") is None and values.get("sync_browser") is None:
raise ValueError("Either async_browser or sync_browser must be specified.")
return values
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tool_classes: List[Type[BaseBrowserTool]] = [
ClickTool,
NavigateTool,
NavigateBackTool,
ExtractTextTool,
ExtractHyperlinksTool,
GetElementsTool,
CurrentWebPageTool,
]
tools = [
tool_cls.from_browser(
sync_browser=self.sync_browser, async_browser=self.async_browser
)
for tool_cls in tool_classes
]
return cast(List[BaseTool], tools)
@classmethod
def from_browser(
cls,
sync_browser: Optional[SyncBrowser] = None,
async_browser: Optional[AsyncBrowser] = None,
) -> PlayWrightBrowserToolkit:
"""Instantiate the toolkit.
Args:
sync_browser: Optional. The sync browser. Default is None.
async_browser: Optional. The async browser. Default is None.
Returns:
The toolkit.
"""
# This is to raise a better error than the forward ref ones Pydantic would have
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/playwright/__init__.py | """Playwright browser toolkit."""
from langchain_community.agent_toolkits.playwright.toolkit import (
PlayWrightBrowserToolkit,
)
__all__ = ["PlayWrightBrowserToolkit"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/csv/__init__.py | from pathlib import Path
from typing import Any
from langchain_core._api.path import as_import_path
def __getattr__(name: str) -> Any:
"""Get attr name."""
if name == "create_csv_agent":
# Get directory of langchain package
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = "langchain." + here + "." + name
new_path = "langchain_experimental." + here + "." + name
raise ImportError(
"This agent has been moved to langchain experiment. "
"This agent relies on python REPL tool under the hood, so to use it "
"safely please sandbox the python REPL. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
f"update your import statement from:\n `{old_path}` to `{new_path}`."
)
raise AttributeError(f"{name} does not exist")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/json/base.py | """Json agent."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
if TYPE_CHECKING:
from langchain.agents.agent import AgentExecutor
def create_json_agent(
llm: BaseLanguageModel,
toolkit: JsonToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = JSON_PREFIX,
suffix: str = JSON_SUFFIX,
format_instructions: Optional[str] = None,
input_variables: Optional[List[str]] = None,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Construct a json agent from an LLM and tools.
Args:
llm: The language model to use.
toolkit: The toolkit to use.
callback_manager: The callback manager to use. Default is None.
prefix: The prefix to use. Default is JSON_PREFIX.
suffix: The suffix to use. Default is JSON_SUFFIX.
format_instructions: The format instructions to use. Default is None.
input_variables: The input variables to use. Default is None.
verbose: Whether to print verbose output. Default is False.
agent_executor_kwargs: Optional additional arguments for the agent executor.
kwargs: Additional arguments for the agent.
Returns:
The agent executor.
"""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prompt_params = (
{"format_instructions": format_instructions}
if format_instructions is not None
else {}
)
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
**prompt_params,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/json/toolkit.py | from __future__ import annotations
from typing import List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.json.tool import (
JsonGetValueTool,
JsonListKeysTool,
JsonSpec,
)
class JsonToolkit(BaseToolkit):
"""Toolkit for interacting with a JSON spec.
Parameters:
spec: The JSON spec.
"""
spec: JsonSpec
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
JsonListKeysTool(spec=self.spec),
JsonGetValueTool(spec=self.spec),
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/json/__init__.py | """Json agent."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits | lc_public_repos/langchain/libs/community/langchain_community/agent_toolkits/json/prompt.py | # flake8: noqa
JSON_PREFIX = """You are an agent designed to interact with JSON.
Your goal is to return a final answer by interacting with the JSON.
You have access to the following tools which help you learn more about the JSON you are interacting with.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
Do not make up any information that is not contained in the JSON.
Your input to the tools should be in the form of `data["key"][0]` where `data` is the JSON blob you are interacting with, and the syntax used is Python.
You should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling `json_spec_list_keys`.
If you have not seen a key in one of those responses, you cannot use it.
You should only add one key at a time to the path. You cannot add multiple keys at once.
If you encounter a "KeyError", go back to the previous key, look at the available keys, and try again.
If the question does not seem to be related to the JSON, just return "I don't know" as the answer.
Always begin your interaction with the `json_spec_list_keys` tool with input "data" to see what keys exist in the JSON.
Note that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".
In this case, you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that path.
Do not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.
"""
JSON_SUFFIX = """Begin!"
Question: {input}
Thought: I should look at the keys that exist in data to see what I have access to
{agent_scratchpad}"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/cross_encoders/base.py | from langchain.retrievers.document_compressors.cross_encoder import BaseCrossEncoder
__all__ = ["BaseCrossEncoder"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/cross_encoders/huggingface.py | from typing import Any, Dict, List, Tuple
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.cross_encoders.base import BaseCrossEncoder
DEFAULT_MODEL_NAME = "BAAI/bge-reranker-base"
class HuggingFaceCrossEncoder(BaseModel, BaseCrossEncoder):
"""HuggingFace cross encoder models.
Example:
.. code-block:: python
from langchain_community.cross_encoders import HuggingFaceCrossEncoder
model_name = "BAAI/bge-reranker-base"
model_kwargs = {'device': 'cpu'}
hf = HuggingFaceCrossEncoder(
model_name=model_name,
model_kwargs=model_kwargs
)
"""
client: Any = None #: :meta private:
model_name: str = DEFAULT_MODEL_NAME
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the model."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers python package. "
"Please install it with `pip install sentence-transformers`."
) from exc
self.client = sentence_transformers.CrossEncoder(
self.model_name, **self.model_kwargs
)
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def score(self, text_pairs: List[Tuple[str, str]]) -> List[float]:
"""Compute similarity scores using a HuggingFace transformer model.
Args:
text_pairs: The list of text text_pairs to score the similarity.
Returns:
List of scores, one for each pair.
"""
scores = self.client.predict(text_pairs)
# Some models e.g bert-multilingual-passage-reranking-msmarco
# gives two score not_relevant and relevant as compare with the query.
if len(scores.shape) > 1: # we are going to get the relevant scores
scores = map(lambda x: x[1], scores)
return scores
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/cross_encoders/sagemaker_endpoint.py | import json
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, ConfigDict, model_validator
from langchain_community.cross_encoders.base import BaseCrossEncoder
class CrossEncoderContentHandler:
"""Content handler for CrossEncoder class."""
content_type = "application/json"
accepts = "application/json"
def transform_input(self, text_pairs: List[Tuple[str, str]]) -> bytes:
input_str = json.dumps({"text_pairs": text_pairs})
return input_str.encode("utf-8")
def transform_output(self, output: Any) -> List[float]:
response_json = json.loads(output.read().decode("utf-8"))
scores = response_json["scores"]
return scores
class SagemakerEndpointCrossEncoder(BaseModel, BaseCrossEncoder):
"""SageMaker Inference CrossEncoder endpoint.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain.embeddings import SagemakerEndpointCrossEncoder
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpointCrossEncoder(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
"""
client: Any = None #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: CrossEncoderContentHandler = CrossEncoderContentHandler()
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, extra="forbid", protected_namespaces=()
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values.get("credentials_profile_name"):
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
def score(self, text_pairs: List[Tuple[str, str]]) -> List[float]:
"""Call out to SageMaker Inference CrossEncoder endpoint."""
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(text_pairs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return self.content_handler.transform_output(response["Body"])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/cross_encoders/fake.py | from difflib import SequenceMatcher
from typing import List, Tuple
from pydantic import BaseModel
from langchain_community.cross_encoders.base import BaseCrossEncoder
class FakeCrossEncoder(BaseCrossEncoder, BaseModel):
"""Fake cross encoder model."""
def score(self, text_pairs: List[Tuple[str, str]]) -> List[float]:
scores = list(
map(
lambda pair: SequenceMatcher(None, pair[0], pair[1]).ratio(), text_pairs
)
)
return scores
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/cross_encoders/__init__.py | """**Cross encoders** are wrappers around cross encoder models from different APIs and
services.
**Cross encoder models** can be LLMs or not.
**Class hierarchy:**
.. code-block::
BaseCrossEncoder --> <name>CrossEncoder # Examples: SagemakerEndpointCrossEncoder
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.cross_encoders.base import (
BaseCrossEncoder,
)
from langchain_community.cross_encoders.fake import (
FakeCrossEncoder,
)
from langchain_community.cross_encoders.huggingface import (
HuggingFaceCrossEncoder,
)
from langchain_community.cross_encoders.sagemaker_endpoint import (
SagemakerEndpointCrossEncoder,
)
__all__ = [
"BaseCrossEncoder",
"FakeCrossEncoder",
"HuggingFaceCrossEncoder",
"SagemakerEndpointCrossEncoder",
]
_module_lookup = {
"BaseCrossEncoder": "langchain_community.cross_encoders.base",
"FakeCrossEncoder": "langchain_community.cross_encoders.fake",
"HuggingFaceCrossEncoder": "langchain_community.cross_encoders.huggingface",
"SagemakerEndpointCrossEncoder": "langchain_community.cross_encoders.sagemaker_endpoint", # noqa: E501
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/baichuan.py | from __future__ import annotations
import json
import logging
from typing import Any, Dict, List, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class BaichuanLLM(LLM):
# TODO: Adding streaming support.
"""Baichuan large language models."""
model: str = "Baichuan2-Turbo-192k"
"""
Other models are available at https://platform.baichuan-ai.com/docs/api.
"""
temperature: float = 0.3
top_p: float = 0.95
timeout: int = 60
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
baichuan_api_host: Optional[str] = None
baichuan_api_key: Optional[SecretStr] = None
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
values["baichuan_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "baichuan_api_key", "BAICHUAN_API_KEY")
)
values["baichuan_api_host"] = get_from_dict_or_env(
values,
"baichuan_api_host",
"BAICHUAN_API_HOST",
default="https://api.baichuan-ai.com/v1/chat/completions",
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
def _post(self, request: Any) -> Any:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.baichuan_api_key.get_secret_value()}", # type: ignore[union-attr]
}
try:
response = requests.post(
self.baichuan_api_host, # type: ignore[arg-type]
headers=headers,
json=request,
timeout=self.timeout,
)
if response.status_code == 200:
parsed_json = json.loads(response.text)
return parsed_json["choices"][0]["message"]["content"]
else:
response.raise_for_status()
except Exception as e:
raise ValueError(f"An error has occurred: {e}")
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = self._default_params
request["messages"] = [{"role": "user", "content": prompt}]
request.update(kwargs)
text = self._post(request)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "baichuan-llm"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/yi.py | from __future__ import annotations
import json
import logging
from typing import Any, Dict, List, Literal, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import Field, SecretStr
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class YiLLM(LLM):
"""Yi large language models."""
model: str = "yi-large"
temperature: float = 0.3
top_p: float = 0.95
timeout: int = 60
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
yi_api_key: Optional[SecretStr] = None
region: Literal["auto", "domestic", "international"] = "auto"
yi_api_url_domestic: str = "https://api.lingyiwanwu.com/v1/chat/completions"
yi_api_url_international: str = "https://api.01.ai/v1/chat/completions"
def __init__(self, **kwargs: Any):
kwargs["yi_api_key"] = convert_to_secret_str(
get_from_dict_or_env(kwargs, "yi_api_key", "YI_API_KEY")
)
super().__init__(**kwargs)
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
def _post(self, request: Any) -> Any:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.yi_api_key.get_secret_value()}", # type: ignore
}
urls = []
if self.region == "domestic":
urls = [self.yi_api_url_domestic]
elif self.region == "international":
urls = [self.yi_api_url_international]
else: # auto
urls = [self.yi_api_url_domestic, self.yi_api_url_international]
for url in urls:
try:
response = requests.post(
url,
headers=headers,
json=request,
timeout=self.timeout,
)
if response.status_code == 200:
parsed_json = json.loads(response.text)
return parsed_json["choices"][0]["message"]["content"]
elif (
response.status_code != 403
): # If not a permission error, raise immediately
response.raise_for_status()
except requests.RequestException as e:
if url == urls[-1]: # If this is the last URL to try
raise ValueError(f"An error has occurred: {e}")
else:
logger.warning(f"Failed to connect to {url}, trying next URL")
continue
raise ValueError("Failed to connect to all available URLs")
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = self._default_params
request["messages"] = [{"role": "user", "content": prompt}]
request.update(kwargs)
text = self._post(request)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "yi-llm"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/replicate.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_from_dict_or_env, pre_init
from langchain_core.utils.pydantic import get_fields
from pydantic import ConfigDict, Field, model_validator
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain_community.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
model_config = ConfigDict(
populate_by_name=True,
extra="forbid",
)
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "replicate"]
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field for field in get_fields(cls).keys()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
yield GenerationChunk(text=output)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
if ":" in self.model:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
else:
model = replicate_python.models.get(self.model)
self.version_obj = model.latest_version
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
# if it's an official model
if ":" not in self.model:
return replicate_python.models.predictions.create(self.model, input=input_)
else:
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/vllm.py | from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain_core.utils import pre_init
from pydantic import Field
from langchain_community.llms.openai import BaseOpenAI
from langchain_community.utils.openai import is_openai_v1
class VLLM(BaseLLM):
"""VLLM language model."""
model: str = ""
"""The name or path of a HuggingFace Transformers model."""
tensor_parallel_size: Optional[int] = 1
"""The number of GPUs to use for distributed execution with tensor parallelism."""
trust_remote_code: Optional[bool] = False
"""Trust remote code (e.g., from HuggingFace) when downloading the model
and tokenizer."""
n: int = 1
"""Number of output sequences to return for the given prompt."""
best_of: Optional[int] = None
"""Number of output sequences that are generated from the prompt."""
presence_penalty: float = 0.0
"""Float that penalizes new tokens based on whether they appear in the
generated text so far"""
frequency_penalty: float = 0.0
"""Float that penalizes new tokens based on their frequency in the
generated text so far"""
temperature: float = 1.0
"""Float that controls the randomness of the sampling."""
top_p: float = 1.0
"""Float that controls the cumulative probability of the top tokens to consider."""
top_k: int = -1
"""Integer that controls the number of top tokens to consider."""
use_beam_search: bool = False
"""Whether to use beam search instead of sampling."""
stop: Optional[List[str]] = None
"""List of strings that stop the generation when they are generated."""
ignore_eos: bool = False
"""Whether to ignore the EOS token and continue generating tokens after
the EOS token is generated."""
max_new_tokens: int = 512
"""Maximum number of tokens to generate per output sequence."""
logprobs: Optional[int] = None
"""Number of log probabilities to return per output token."""
dtype: str = "auto"
"""The data type for the model weights and activations."""
download_dir: Optional[str] = None
"""Directory to download and load the weights. (Default to the default
cache dir of huggingface)"""
vllm_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `vllm.LLM` call not explicitly specified."""
client: Any = None #: :meta private:
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from vllm import LLM as VLLModel
except ImportError:
raise ImportError(
"Could not import vllm python package. "
"Please install it with `pip install vllm`."
)
values["client"] = VLLModel(
model=values["model"],
tensor_parallel_size=values["tensor_parallel_size"],
trust_remote_code=values["trust_remote_code"],
dtype=values["dtype"],
download_dir=values["download_dir"],
**values["vllm_kwargs"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling vllm."""
return {
"n": self.n,
"best_of": self.best_of,
"max_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"temperature": self.temperature,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"stop": self.stop,
"ignore_eos": self.ignore_eos,
"use_beam_search": self.use_beam_search,
"logprobs": self.logprobs,
}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
from vllm import SamplingParams
lora_request = kwargs.pop("lora_request", None)
# build sampling parameters
params = {**self._default_params, **kwargs, "stop": stop}
# filter params for SamplingParams
known_keys = SamplingParams.__annotations__.keys()
sample_params = SamplingParams(
**{k: v for k, v in params.items() if k in known_keys}
)
# call the model
if lora_request:
outputs = self.client.generate(
prompts, sample_params, lora_request=lora_request
)
else:
outputs = self.client.generate(prompts, sample_params)
generations = []
for output in outputs:
text = output.outputs[0].text
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "vllm"
class VLLMOpenAI(BaseOpenAI):
"""vLLM OpenAI-compatible API client"""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params: Dict[str, Any] = {
"model": self.model_name,
**self._default_params,
"logit_bias": None,
}
if not is_openai_v1():
params.update(
{
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
}
)
return params
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "vllm-openai"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/gradient_ai.py | import asyncio
import logging
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Mapping, Optional, Sequence, TypedDict
import aiohttp
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, Field, model_validator
from typing_extensions import Self
from langchain_community.llms.utils import enforce_stop_tokens
class TrainResult(TypedDict):
"""Train result."""
loss: float
class GradientLLM(BaseLLM):
"""Gradient.ai LLM Endpoints.
GradientLLM is a class to interact with LLMs on gradient.ai
To use, set the environment variable ``GRADIENT_ACCESS_TOKEN`` with your
API token and ``GRADIENT_WORKSPACE_ID`` for your gradient workspace,
or alternatively provide them as keywords to the constructor of this class.
Example:
.. code-block:: python
from langchain_community.llms import GradientLLM
GradientLLM(
model="99148c6d-c2a0-4fbe-a4a7-e7c05bdb8a09_base_ml_model",
model_kwargs={
"max_generated_token_count": 128,
"temperature": 0.75,
"top_p": 0.95,
"top_k": 20,
"stop": [],
},
gradient_workspace_id="12345614fc0_workspace",
gradient_access_token="gradientai-access_token",
)
"""
model_id: str = Field(alias="model", min_length=2)
"Underlying gradient.ai model id (base or fine-tuned)."
gradient_workspace_id: Optional[str] = None
"Underlying gradient.ai workspace_id."
gradient_access_token: Optional[str] = None
"""gradient.ai API Token, which can be generated by going to
https://auth.gradient.ai/select-workspace
and selecting "Access tokens" under the profile drop-down.
"""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
gradient_api_url: str = "https://api.gradient.ai/api"
"""Endpoint URL to use."""
aiosession: Optional[aiohttp.ClientSession] = None #: :meta private:
"""ClientSession, private, subject to change in upcoming releases."""
# LLM call kwargs
model_config = ConfigDict(
populate_by_name=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["gradient_access_token"] = get_from_dict_or_env(
values, "gradient_access_token", "GRADIENT_ACCESS_TOKEN"
)
values["gradient_workspace_id"] = get_from_dict_or_env(
values, "gradient_workspace_id", "GRADIENT_WORKSPACE_ID"
)
values["gradient_api_url"] = get_from_dict_or_env(
values, "gradient_api_url", "GRADIENT_API_URL"
)
return values
@model_validator(mode="after")
def post_init(self) -> Self:
"""Post init validation."""
# Can be most to post_init_validation
try:
import gradientai # noqa
except ImportError:
logging.warning(
"DeprecationWarning: `GradientLLM` will use "
"`pip install gradientai` in future releases of langchain."
)
except Exception:
pass
# Can be most to post_init_validation
if self.gradient_access_token is None or len(self.gradient_access_token) < 10:
raise ValueError("env variable `GRADIENT_ACCESS_TOKEN` must be set")
if self.gradient_workspace_id is None or len(self.gradient_access_token) < 3:
raise ValueError("env variable `GRADIENT_WORKSPACE_ID` must be set")
if self.model_kwargs:
kw = self.model_kwargs
if not 0 <= kw.get("temperature", 0.5) <= 1:
raise ValueError("`temperature` must be in the range [0.0, 1.0]")
if not 0 <= kw.get("top_p", 0.5) <= 1:
raise ValueError("`top_p` must be in the range [0.0, 1.0]")
if 0 >= kw.get("top_k", 0.5):
raise ValueError("`top_k` must be positive")
if 0 >= kw.get("max_generated_token_count", 1):
raise ValueError("`max_generated_token_count` must be positive")
return self
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"gradient_api_url": self.gradient_api_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gradient"
def _kwargs_post_fine_tune_request(
self, inputs: Sequence[str], kwargs: Mapping[str, Any]
) -> Mapping[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
multipliers = _params.get("multipliers", None)
return dict(
url=f"{self.gradient_api_url}/models/{self.model_id}/fine-tune",
headers={
"authorization": f"Bearer {self.gradient_access_token}",
"x-gradient-workspace-id": f"{self.gradient_workspace_id}",
"accept": "application/json",
"content-type": "application/json",
},
json=dict(
samples=(
tuple(
{
"inputs": input,
}
for input in inputs
)
if multipliers is None
else tuple(
{
"inputs": input,
"fineTuningParameters": {
"multiplier": multiplier,
},
}
for input, multiplier in zip(inputs, multipliers)
)
),
),
)
def _kwargs_post_request(
self, prompt: str, kwargs: Mapping[str, Any]
) -> Mapping[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
return dict(
url=f"{self.gradient_api_url}/models/{self.model_id}/complete",
headers={
"authorization": f"Bearer {self.gradient_access_token}",
"x-gradient-workspace-id": f"{self.gradient_workspace_id}",
"accept": "application/json",
"content-type": "application/json",
},
json=dict(
query=prompt,
maxGeneratedTokenCount=_params.get("max_generated_token_count", None),
temperature=_params.get("temperature", None),
topK=_params.get("top_k", None),
topP=_params.get("top_p", None),
),
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Gradients API `model/{id}/complete`.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
try:
response = requests.post(**self._kwargs_post_request(prompt, kwargs))
if response.status_code != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
except requests.exceptions.RequestException as e:
raise Exception(f"RequestException while calling Gradient Endpoint: {e}")
text = response.json()["generatedOutput"]
if stop is not None:
# Apply stop tokens when making calls to Gradient
text = enforce_stop_tokens(text, stop)
return text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Async Call to Gradients API `model/{id}/complete`.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
**self._kwargs_post_request(prompt=prompt, kwargs=kwargs)
) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
text = (await response.json())["generatedOutput"]
else:
async with self.aiosession.post(
**self._kwargs_post_request(prompt=prompt, kwargs=kwargs)
) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
text = (await response.json())["generatedOutput"]
if stop is not None:
# Apply stop tokens when making calls to Gradient
text = enforce_stop_tokens(text, stop)
return text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# same thing with threading
def _inner_generate(prompt: str) -> List[Generation]:
return [
Generation(
text=self._call(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
)
]
if len(prompts) <= 1:
generations = list(map(_inner_generate, prompts))
else:
with ThreadPoolExecutor(min(8, len(prompts))) as p:
generations = list(p.map(_inner_generate, prompts))
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
generations = []
for generation in await asyncio.gather(
*[
self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs)
for prompt in prompts
]
):
generations.append([Generation(text=generation)])
return LLMResult(generations=generations)
def train_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult:
try:
response = requests.post(
**self._kwargs_post_fine_tune_request(inputs, kwargs)
)
if response.status_code != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
except requests.exceptions.RequestException as e:
raise Exception(f"RequestException while calling Gradient Endpoint: {e}")
response_json = response.json()
loss = response_json["sumLoss"] / response_json["numberOfTrainableTokens"]
return TrainResult(loss=loss)
async def atrain_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult:
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
**self._kwargs_post_fine_tune_request(inputs, kwargs)
) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
loss = (
response_json["sumLoss"]
/ response_json["numberOfTrainableTokens"]
)
else:
async with self.aiosession.post(
**self._kwargs_post_fine_tune_request(inputs, kwargs)
) as response:
if response.status != 200:
raise Exception(
f"Gradient returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
loss = (
response_json["sumLoss"] / response_json["numberOfTrainableTokens"]
)
return TrainResult(loss=loss)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/self_hosted.py | import importlib.util
import logging
import pickle
from typing import Any, Callable, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
def _generate_text(
pipeline: Any,
prompt: str,
*args: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Inference function to send to the remote hardware.
Accepts a pipeline callable (or, more likely,
a key pointing to the model on the cluster's object store)
and returns text predictions for each document
in the batch.
"""
text = pipeline(prompt, *args, **kwargs)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _send_pipeline_to_device(pipeline: Any, device: int) -> Any:
"""Send a pipeline to a device on the cluster."""
if isinstance(pipeline, str):
with open(pipeline, "rb") as f:
# This code path can only be triggered if the user
# passed allow_dangerous_deserialization=True
pipeline = pickle.load(f) # ignore[pickle]: explicit-opt-in
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
pipeline.device = torch.device(device)
pipeline.model = pipeline.model.to(pipeline.device)
return pipeline
class SelfHostedPipeline(LLM):
"""Model inference on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example for custom pipeline and inference functions:
.. code-block:: python
from langchain_community.llms import SelfHostedPipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def load_pipeline():
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelForCausalLM.from_pretrained("gpt2")
return pipeline(
"text-generation", model=model, tokenizer=tokenizer,
max_new_tokens=10
)
def inference_fn(pipeline, prompt, stop = None):
return pipeline(prompt)[0]["generated_text"]
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs, inference_fn=inference_fn
)
Example for <2GB model (can be serialized and sent directly to the server):
.. code-block:: python
from langchain_community.llms import SelfHostedPipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
my_model = ...
llm = SelfHostedPipeline.from_pipeline(
pipeline=my_model,
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
Example passing model path for larger models:
.. code-block:: python
from langchain_community.llms import SelfHostedPipeline
import runhouse as rh
import pickle
from transformers import pipeline
generator = pipeline(model="gpt2")
rh.blob(pickle.dumps(generator), path="models/pipeline.pkl"
).save().to(gpu, path="models")
llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
"""
pipeline_ref: Any = None #: :meta private:
client: Any = None #: :meta private:
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
hardware: Any = None
"""Remote hardware to send the inference function to."""
model_load_fn: Callable
"""Function to load the model remotely on the server."""
load_fn_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model load function."""
model_reqs: List[str] = ["./", "torch"]
"""Requirements to install on hardware to inference the model."""
allow_dangerous_deserialization: bool = False
"""Allow deserialization using pickle which can be dangerous if
loading compromised data.
"""
model_config = ConfigDict(
extra="forbid",
)
def __init__(self, **kwargs: Any):
"""Init the pipeline with an auxiliary function.
The load function must be in global scope to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
if not kwargs.get("allow_dangerous_deserialization"):
raise ValueError(
"SelfHostedPipeline relies on the pickle module. "
"You will need to set allow_dangerous_deserialization=True "
"if you want to opt-in to allow deserialization of data using pickle."
"Data can be compromised by a malicious actor if "
"not handled properly to include "
"a malicious payload that when deserialized with "
"pickle can execute arbitrary code. "
)
super().__init__(**kwargs)
try:
import runhouse as rh
except ImportError:
raise ImportError(
"Could not import runhouse python package. "
"Please install it with `pip install runhouse`."
)
remote_load_fn = rh.function(fn=self.model_load_fn).to(
self.hardware, reqs=self.model_reqs
)
_load_fn_kwargs = self.load_fn_kwargs or {}
self.pipeline_ref = remote_load_fn.remote(**_load_fn_kwargs)
self.client = rh.function(fn=self.inference_fn).to(
self.hardware, reqs=self.model_reqs
)
@classmethod
def from_pipeline(
cls,
pipeline: Any,
hardware: Any,
model_reqs: Optional[List[str]] = None,
device: int = 0,
**kwargs: Any,
) -> LLM:
"""Init the SelfHostedPipeline from a pipeline object or string."""
if not isinstance(pipeline, str):
logger.warning(
"Serializing pipeline to send to remote hardware. "
"Note, it can be quite slow"
"to serialize and send large models with each execution. "
"Consider sending the pipeline"
"to the cluster and passing the path to the pipeline instead."
)
load_fn_kwargs = {"pipeline": pipeline, "device": device}
return cls(
load_fn_kwargs=load_fn_kwargs,
model_load_fn=_send_pipeline_to_device,
hardware=hardware,
model_reqs=["transformers", "torch"] + (model_reqs or []),
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"hardware": self.hardware},
}
@property
def _llm_type(self) -> str:
return "self_hosted_llm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return self.client(
pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/outlines.py | from __future__ import annotations
import importlib.util
import logging
import platform
from typing import Any, Callable, Dict, Iterator, List, Literal, Optional, Tuple, Union
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from pydantic import BaseModel, Field, model_validator
logger = logging.getLogger(__name__)
class Outlines(LLM):
"""LLM wrapper for the Outlines library."""
client: Any = None # :meta private:
model: str
"""Identifier for the model to use with Outlines.
The model identifier should be a string specifying:
- A Hugging Face model name (e.g., "meta-llama/Llama-2-7b-chat-hf")
- A local path to a model
- For GGUF models, the format is "repo_id/file_name"
(e.g., "TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q4_K_M.gguf")
Examples:
- "TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q4_K_M.gguf"
- "meta-llama/Llama-2-7b-chat-hf"
"""
backend: Literal[
"llamacpp", "transformers", "transformers_vision", "vllm", "mlxlm"
] = "transformers"
"""Specifies the backend to use for the model.
Supported backends are:
- "llamacpp": For GGUF models using llama.cpp
- "transformers": For Hugging Face Transformers models (default)
- "transformers_vision": For vision-language models (e.g., LLaVA)
- "vllm": For models using the vLLM library
- "mlxlm": For models using the MLX framework
Note: Ensure you have the necessary dependencies installed for the chosen backend.
The system will attempt to import required packages and may raise an ImportError
if they are not available.
"""
max_tokens: int = 256
"""The maximum number of tokens to generate."""
stop: Optional[List[str]] = None
"""A list of strings to stop generation when encountered."""
streaming: bool = True
"""Whether to stream the results, token by token."""
regex: Optional[str] = None
"""Regular expression for structured generation.
If provided, Outlines will guarantee that the generated text matches this regex.
This can be useful for generating structured outputs like IP addresses, dates, etc.
Example: (valid IP address)
regex = r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)"
Note: Computing the regex index can take some time, so it's recommended to reuse
the same regex for multiple generations if possible.
For more details, see: https://dottxt-ai.github.io/outlines/reference/generation/regex/
"""
type_constraints: Optional[Union[type, str]] = None
"""Type constraints for structured generation.
Restricts the output to valid Python types. Supported types include:
int, float, bool, datetime.date, datetime.time, datetime.datetime.
Example:
type_constraints = int
For more details, see: https://dottxt-ai.github.io/outlines/reference/generation/format/
"""
json_schema: Optional[Union[BaseModel, Dict, Callable]] = None
"""Pydantic model, JSON Schema, or callable (function signature)
for structured JSON generation.
Outlines can generate JSON output that follows a specified structure,
which is useful for:
1. Parsing the answer (e.g., with Pydantic), storing it, or returning it to a user.
2. Calling a function with the result.
You can provide:
- A Pydantic model
- A JSON Schema (as a Dict)
- A callable (function signature)
The generated JSON will adhere to the specified structure.
For more details, see: https://dottxt-ai.github.io/outlines/reference/generation/json/
"""
grammar: Optional[str] = None
"""Context-free grammar for structured generation.
If provided, Outlines will generate text that adheres to the specified grammar.
The grammar should be defined in EBNF format.
This can be useful for generating structured outputs like mathematical expressions,
programming languages, or custom domain-specific languages.
Example:
grammar = '''
?start: expression
?expression: term (("+" | "-") term)*
?term: factor (("*" | "/") factor)*
?factor: NUMBER | "-" factor | "(" expression ")"
%import common.NUMBER
'''
Note: Grammar-based generation is currently experimental and may have performance
limitations. It uses greedy generation to mitigate these issues.
For more details and examples, see:
https://dottxt-ai.github.io/outlines/reference/generation/cfg/
"""
custom_generator: Optional[Any] = None
"""Set your own outlines generator object to override the default behavior."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Additional parameters to pass to the underlying model.
Example:
model_kwargs = {"temperature": 0.8, "seed": 42}
"""
@model_validator(mode="after")
def validate_environment(self) -> "Outlines":
"""Validate that outlines is installed and create a model instance."""
num_constraints = sum(
[
bool(self.regex),
bool(self.type_constraints),
bool(self.json_schema),
bool(self.grammar),
]
)
if num_constraints > 1:
raise ValueError(
"Either none or exactly one of regex, type_constraints, "
"json_schema, or grammar can be provided."
)
return self.build_client()
def build_client(self) -> "Outlines":
try:
import outlines.models as models
except ImportError:
raise ImportError(
"Could not import the Outlines library. "
"Please install it with `pip install outlines`."
)
def check_packages_installed(
packages: List[Union[str, Tuple[str, str]]],
) -> None:
missing_packages = [
pkg if isinstance(pkg, str) else pkg[0]
for pkg in packages
if importlib.util.find_spec(pkg[1] if isinstance(pkg, tuple) else pkg)
is None
]
if missing_packages:
raise ImportError( # todo this is displaying wrong
f"Missing packages: {', '.join(missing_packages)}. "
"You can install them with:\n\n"
f" pip install {' '.join(missing_packages)}"
)
if self.backend == "llamacpp":
if ".gguf" in self.model:
creator, repo_name, file_name = self.model.split("/", 2)
repo_id = f"{creator}/{repo_name}"
else: # todo add auto-file-selection if no file is given
raise ValueError("GGUF file_name must be provided for llama.cpp.")
check_packages_installed([("llama-cpp-python", "llama_cpp")])
self.client = models.llamacpp(repo_id, file_name, **self.model_kwargs)
elif self.backend == "transformers":
check_packages_installed(["transformers", "torch", "datasets"])
self.client = models.transformers(self.model, **self.model_kwargs)
elif self.backend == "transformers_vision":
check_packages_installed(
["transformers", "datasets", "torchvision", "PIL", "flash_attn"]
)
from transformers import LlavaNextForConditionalGeneration
if not hasattr(models, "transformers_vision"):
raise ValueError(
"transformers_vision backend is not supported, "
"please install the correct outlines version."
)
self.client = models.transformers_vision(
self.model,
model_class=LlavaNextForConditionalGeneration,
**self.model_kwargs,
)
elif self.backend == "vllm":
if platform.system() == "Darwin":
raise ValueError("vLLM backend is not supported on macOS.")
check_packages_installed(["vllm"])
self.client = models.vllm(self.model, **self.model_kwargs)
elif self.backend == "mlxlm":
check_packages_installed(["mlx"])
self.client = models.mlxlm(self.model, **self.model_kwargs)
else:
raise ValueError(f"Unsupported backend: {self.backend}")
return self
@property
def _llm_type(self) -> str:
return "outlines"
@property
def _default_params(self) -> Dict[str, Any]:
return {
"max_tokens": self.max_tokens,
"stop_at": self.stop,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"backend": self.backend,
"regex": self.regex,
"type_constraints": self.type_constraints,
"json_schema": self.json_schema,
"grammar": self.grammar,
**self._default_params,
}
@property
def _generator(self) -> Any:
from outlines import generate
if self.custom_generator:
return self.custom_generator
if self.regex:
return generate.regex(self.client, regex_str=self.regex)
if self.type_constraints:
return generate.format(self.client, python_type=self.type_constraints)
if self.json_schema:
return generate.json(self.client, schema_object=self.json_schema)
if self.grammar:
return generate.cfg(self.client, cfg_str=self.grammar)
return generate.text(self.client)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
params = {**self._default_params, **kwargs}
if stop:
params["stop_at"] = stop
response = ""
if self.streaming:
for chunk in self._stream(
prompt=prompt,
stop=params["stop_at"],
run_manager=run_manager,
**params,
):
response += chunk.text
else:
response = self._generator(prompt, **params)
return response
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._default_params, **kwargs}
if stop:
params["stop_at"] = stop
for token in self._generator.stream(prompt, **params):
if run_manager:
run_manager.on_llm_new_token(token)
yield GenerationChunk(text=token)
@property
def tokenizer(self) -> Any:
"""Access the tokenizer for the underlying model.
.encode() to tokenize text.
.decode() to convert tokens back to text.
"""
if hasattr(self.client, "tokenizer"):
return self.client.tokenizer
raise ValueError("Tokenizer not found")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/databricks.py | import os
import re
import warnings
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Mapping, Optional
import requests
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LLM
from pydantic import (
BaseModel,
ConfigDict,
Field,
PrivateAttr,
model_validator,
)
__all__ = ["Databricks"]
class _DatabricksClientBase(BaseModel, ABC):
"""A base JSON API client that talks to Databricks."""
api_url: str
api_token: str
def request(self, method: str, url: str, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_token}"}
response = requests.request(
method=method, url=url, headers=headers, json=request
)
# TODO: error handling and automatic retries
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
return response.json()
def _get(self, url: str) -> Any:
return self.request("GET", url, None)
def _post(self, url: str, request: Any) -> Any:
return self.request("POST", url, request)
@abstractmethod
def post(
self, request: Any, transform_output_fn: Optional[Callable[..., str]] = None
) -> Any: ...
@property
def llm(self) -> bool:
return False
def _transform_completions(response: Dict[str, Any]) -> str:
return response["choices"][0]["text"]
def _transform_llama2_chat(response: Dict[str, Any]) -> str:
return response["candidates"][0]["text"]
def _transform_chat(response: Dict[str, Any]) -> str:
return response["choices"][0]["message"]["content"]
class _DatabricksServingEndpointClient(_DatabricksClientBase):
"""An API client that talks to a Databricks serving endpoint."""
host: str
endpoint_name: str
databricks_uri: str
client: Any = None
external_or_foundation: bool = False
task: Optional[str] = None
def __init__(self, **data: Any):
super().__init__(**data)
try:
from mlflow.deployments import get_deploy_client
self.client = get_deploy_client(self.databricks_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
"Please install mlflow with `pip install mlflow`."
) from e
endpoint = self.client.get_endpoint(self.endpoint_name)
self.external_or_foundation = endpoint.get("endpoint_type", "").lower() in (
"external_model",
"foundation_model_api",
)
if self.task is None:
self.task = endpoint.get("task")
@property
def llm(self) -> bool:
return self.task in ("llm/v1/chat", "llm/v1/completions", "llama2/chat")
@model_validator(mode="before")
@classmethod
def set_api_url(cls, values: Dict[str, Any]) -> Any:
if "api_url" not in values:
host = values["host"]
endpoint_name = values["endpoint_name"]
api_url = f"https://{host}/serving-endpoints/{endpoint_name}/invocations"
values["api_url"] = api_url
return values
def post(
self, request: Any, transform_output_fn: Optional[Callable[..., str]] = None
) -> Any:
if self.external_or_foundation:
resp = self.client.predict(endpoint=self.endpoint_name, inputs=request)
if transform_output_fn:
return transform_output_fn(resp)
if self.task == "llm/v1/chat":
return _transform_chat(resp)
elif self.task == "llm/v1/completions":
return _transform_completions(resp)
return resp
else:
# See https://docs.databricks.com/machine-learning/model-serving/score-model-serving-endpoints.html
wrapped_request = {"dataframe_records": [request]}
response = self.client.predict(
endpoint=self.endpoint_name, inputs=wrapped_request
)
preds = response["predictions"]
# For a single-record query, the result is not a list.
pred = preds[0] if isinstance(preds, list) else preds
if self.task == "llama2/chat":
return _transform_llama2_chat(pred)
return transform_output_fn(pred) if transform_output_fn else pred
class _DatabricksClusterDriverProxyClient(_DatabricksClientBase):
"""An API client that talks to a Databricks cluster driver proxy app."""
host: str
cluster_id: str
cluster_driver_port: str
@model_validator(mode="before")
@classmethod
def set_api_url(cls, values: Dict[str, Any]) -> Any:
if "api_url" not in values:
host = values["host"]
cluster_id = values["cluster_id"]
port = values["cluster_driver_port"]
api_url = f"https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}"
values["api_url"] = api_url
return values
def post(
self, request: Any, transform_output_fn: Optional[Callable[..., str]] = None
) -> Any:
resp = self._post(self.api_url, request)
return transform_output_fn(resp) if transform_output_fn else resp
def get_repl_context() -> Any:
"""Get the notebook REPL context if running inside a Databricks notebook.
Returns None otherwise.
"""
try:
from dbruntime.databricks_repl_context import get_context
return get_context()
except ImportError:
raise ImportError(
"Cannot access dbruntime, not running inside a Databricks notebook."
)
def get_default_host() -> str:
"""Get the default Databricks workspace hostname.
Raises an error if the hostname cannot be automatically determined.
"""
host = os.getenv("DATABRICKS_HOST")
if not host:
try:
host = get_repl_context().browserHostName
if not host:
raise ValueError("context doesn't contain browserHostName.")
except Exception as e:
raise ValueError(
"host was not set and cannot be automatically inferred. Set "
f"environment variable 'DATABRICKS_HOST'. Received error: {e}"
)
# TODO: support Databricks CLI profile
host = host.lstrip("https://").lstrip("http://").rstrip("/")
return host
def get_default_api_token() -> str:
"""Get the default Databricks personal access token.
Raises an error if the token cannot be automatically determined.
"""
if api_token := os.getenv("DATABRICKS_TOKEN"):
return api_token
try:
api_token = get_repl_context().apiToken
if not api_token:
raise ValueError("context doesn't contain apiToken.")
except Exception as e:
raise ValueError(
"api_token was not set and cannot be automatically inferred. Set "
f"environment variable 'DATABRICKS_TOKEN'. Received error: {e}"
)
# TODO: support Databricks CLI profile
return api_token
def _is_hex_string(data: str) -> bool:
"""Checks if a data is a valid hexadecimal string using a regular expression."""
if not isinstance(data, str):
return False
pattern = r"^[0-9a-fA-F]+$"
return bool(re.match(pattern, data))
def _load_pickled_fn_from_hex_string(
data: str, allow_dangerous_deserialization: Optional[bool]
) -> Callable:
"""Loads a pickled function from a hexadecimal string."""
if not allow_dangerous_deserialization:
raise ValueError(
"This code relies on the pickle module. "
"You will need to set allow_dangerous_deserialization=True "
"if you want to opt-in to allow deserialization of data using pickle."
"Data can be compromised by a malicious actor if "
"not handled properly to include "
"a malicious payload that when deserialized with "
"pickle can execute arbitrary code on your machine."
)
try:
import cloudpickle
except Exception as e:
raise ValueError(f"Please install cloudpickle>=2.0.0. Error: {e}")
try:
return cloudpickle.loads(bytes.fromhex(data)) # ignore[pickle]: explicit-opt-in
except Exception as e:
raise ValueError(
f"Failed to load the pickled function from a hexadecimal string. Error: {e}"
)
def _pickle_fn_to_hex_string(fn: Callable) -> str:
"""Pickles a function and returns the hexadecimal string."""
try:
import cloudpickle
except Exception as e:
raise ValueError(f"Please install cloudpickle>=2.0.0. Error: {e}")
try:
return cloudpickle.dumps(fn).hex()
except Exception as e:
raise ValueError(f"Failed to pickle the function: {e}")
@deprecated(
since="0.3.3",
removal="1.0",
alternative_import="langchain_databricks.ChatDatabricks",
)
class Databricks(LLM):
"""Databricks serving endpoint or a cluster driver proxy app for LLM.
It supports two endpoint types:
* **Serving endpoint** (recommended for both production and development).
We assume that an LLM was deployed to a serving endpoint.
To wrap it as an LLM you must have "Can Query" permission to the endpoint.
Set ``endpoint_name`` accordingly and do not set ``cluster_id`` and
``cluster_driver_port``.
If the underlying model is a model registered by MLflow, the expected model
signature is:
* inputs::
[{"name": "prompt", "type": "string"},
{"name": "stop", "type": "list[string]"}]
* outputs: ``[{"type": "string"}]``
If the underlying model is an external or foundation model, the response from the
endpoint is automatically transformed to the expected format unless
``transform_output_fn`` is provided.
* **Cluster driver proxy app** (recommended for interactive development).
One can load an LLM on a Databricks interactive cluster and start a local HTTP
server on the driver node to serve the model at ``/`` using HTTP POST method
with JSON input/output.
Please use a port number between ``[3000, 8000]`` and let the server listen to
the driver IP address or simply ``0.0.0.0`` instead of localhost only.
To wrap it as an LLM you must have "Can Attach To" permission to the cluster.
Set ``cluster_id`` and ``cluster_driver_port`` and do not set ``endpoint_name``.
The expected server schema (using JSON schema) is:
* inputs::
{"type": "object",
"properties": {
"prompt": {"type": "string"},
"stop": {"type": "array", "items": {"type": "string"}}},
"required": ["prompt"]}`
* outputs: ``{"type": "string"}``
If the endpoint model signature is different or you want to set extra params,
you can use `transform_input_fn` and `transform_output_fn` to apply necessary
transformations before and after the query.
"""
host: str = Field(default_factory=get_default_host)
"""Databricks workspace hostname.
If not provided, the default value is determined by
* the ``DATABRICKS_HOST`` environment variable if present, or
* the hostname of the current Databricks workspace if running inside
a Databricks notebook attached to an interactive cluster in "single user"
or "no isolation shared" mode.
"""
api_token: str = Field(default_factory=get_default_api_token)
"""Databricks personal access token.
If not provided, the default value is determined by
* the ``DATABRICKS_TOKEN`` environment variable if present, or
* an automatically generated temporary token if running inside a Databricks
notebook attached to an interactive cluster in "single user" or
"no isolation shared" mode.
"""
endpoint_name: Optional[str] = None
"""Name of the model serving endpoint.
You must specify the endpoint name to connect to a model serving endpoint.
You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_id: Optional[str] = None
"""ID of the cluster if connecting to a cluster driver proxy app.
If neither ``endpoint_name`` nor ``cluster_id`` is not provided and the code runs
inside a Databricks notebook attached to an interactive cluster in "single user"
or "no isolation shared" mode, the current cluster ID is used as default.
You must not set both ``endpoint_name`` and ``cluster_id``.
"""
cluster_driver_port: Optional[str] = None
"""The port number used by the HTTP server running on the cluster driver node.
The server should listen on the driver IP address or simply ``0.0.0.0`` to connect.
We recommend the server using a port number between ``[3000, 8000]``.
"""
model_kwargs: Optional[Dict[str, Any]] = None
"""
Deprecated. Please use ``extra_params`` instead. Extra parameters to pass to
the endpoint.
"""
transform_input_fn: Optional[Callable] = None
"""A function that transforms ``{prompt, stop, **kwargs}`` into a JSON-compatible
request object that the endpoint accepts.
For example, you can apply a prompt template to the input prompt.
"""
transform_output_fn: Optional[Callable[..., str]] = None
"""A function that transforms the output from the endpoint to the generated text.
"""
databricks_uri: str = "databricks"
"""The databricks URI. Only used when using a serving endpoint."""
temperature: float = 0.0
"""The sampling temperature."""
n: int = 1
"""The number of completion choices to generate."""
stop: Optional[List[str]] = None
"""The stop sequence."""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
extra_params: Dict[str, Any] = Field(default_factory=dict)
"""Any extra parameters to pass to the endpoint."""
task: Optional[str] = None
"""The task of the endpoint. Only used when using a serving endpoint.
If not provided, the task is automatically inferred from the endpoint.
"""
allow_dangerous_deserialization: bool = False
"""Whether to allow dangerous deserialization of the data which
involves loading data using pickle.
If the data has been modified by a malicious actor, it can deliver a
malicious payload that results in execution of arbitrary code on the target
machine.
"""
_client: _DatabricksClientBase = PrivateAttr()
model_config = ConfigDict(
extra="forbid",
)
@property
def _llm_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"temperature": self.temperature,
"n": self.n,
}
if self.stop:
params["stop"] = self.stop
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
return params
@model_validator(mode="before")
@classmethod
def set_cluster_id(cls, values: Dict[str, Any]) -> dict:
cluster_id = values.get("cluster_id")
endpoint_name = values.get("endpoint_name")
if cluster_id and endpoint_name:
raise ValueError("Cannot set both endpoint_name and cluster_id.")
elif endpoint_name:
values["cluster_id"] = None
elif cluster_id:
pass
else:
try:
if context_cluster_id := get_repl_context().clusterId:
values["cluster_id"] = context_cluster_id
raise ValueError("Context doesn't contain clusterId.")
except Exception as e:
raise ValueError(
"Neither endpoint_name nor cluster_id was set. "
"And the cluster_id cannot be automatically determined. Received"
f" error: {e}"
)
cluster_driver_port = values.get("cluster_driver_port")
if cluster_driver_port and endpoint_name:
raise ValueError("Cannot set both endpoint_name and cluster_driver_port.")
elif endpoint_name:
values["cluster_driver_port"] = None
elif cluster_driver_port is None:
raise ValueError(
"Must set cluster_driver_port to connect to a cluster driver."
)
elif int(cluster_driver_port) <= 0:
raise ValueError(f"Invalid cluster_driver_port: {cluster_driver_port}")
else:
pass
if model_kwargs := values.get("model_kwargs"):
assert (
"prompt" not in model_kwargs
), "model_kwargs must not contain key 'prompt'"
assert (
"stop" not in model_kwargs
), "model_kwargs must not contain key 'stop'"
return values
def __init__(self, **data: Any):
if "transform_input_fn" in data and _is_hex_string(data["transform_input_fn"]):
data["transform_input_fn"] = _load_pickled_fn_from_hex_string(
data=data["transform_input_fn"],
allow_dangerous_deserialization=data.get(
"allow_dangerous_deserialization"
),
)
if "transform_output_fn" in data and _is_hex_string(
data["transform_output_fn"]
):
data["transform_output_fn"] = _load_pickled_fn_from_hex_string(
data=data["transform_output_fn"],
allow_dangerous_deserialization=data.get(
"allow_dangerous_deserialization"
),
)
super().__init__(**data)
if self.model_kwargs is not None and self.extra_params is not None:
raise ValueError("Cannot set both extra_params and extra_params.")
elif self.model_kwargs is not None:
warnings.warn(
"model_kwargs is deprecated. Please use extra_params instead.",
DeprecationWarning,
)
if self.endpoint_name:
self._client = _DatabricksServingEndpointClient(
host=self.host,
api_token=self.api_token,
endpoint_name=self.endpoint_name,
databricks_uri=self.databricks_uri,
task=self.task,
)
elif self.cluster_id and self.cluster_driver_port:
self._client = _DatabricksClusterDriverProxyClient( # type: ignore[call-arg]
host=self.host,
api_token=self.api_token,
cluster_id=self.cluster_id,
cluster_driver_port=self.cluster_driver_port,
)
else:
raise ValueError(
"Must specify either endpoint_name or cluster_id/cluster_driver_port."
)
@property
def _default_params(self) -> Dict[str, Any]:
"""Return default params."""
return {
"host": self.host,
# "api_token": self.api_token, # Never save the token
"endpoint_name": self.endpoint_name,
"cluster_id": self.cluster_id,
"cluster_driver_port": self.cluster_driver_port,
"databricks_uri": self.databricks_uri,
"model_kwargs": self.model_kwargs,
"temperature": self.temperature,
"n": self.n,
"stop": self.stop,
"max_tokens": self.max_tokens,
"extra_params": self.extra_params,
"task": self.task,
"transform_input_fn": None
if self.transform_input_fn is None
else _pickle_fn_to_hex_string(self.transform_input_fn),
"transform_output_fn": None
if self.transform_output_fn is None
else _pickle_fn_to_hex_string(self.transform_output_fn),
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
return self._default_params
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "databricks"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Queries the LLM endpoint with the given prompt and stop sequence."""
# TODO: support callbacks
request: Dict[str, Any] = {"prompt": prompt}
if self._client.llm:
request.update(self._llm_params)
request.update(self.model_kwargs or self.extra_params)
request.update(kwargs)
if stop:
request["stop"] = stop
if self.transform_input_fn:
request = self.transform_input_fn(**request)
return self._client.post(request, transform_output_fn=self.transform_output_fn)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/modal.py | import logging
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils.pydantic import get_fields
from pydantic import ConfigDict, Field, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class Modal(LLM):
"""Modal large language models.
To use, you should have the ``modal-client`` python package installed.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import Modal
modal = Modal(endpoint_url="")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in get_fields(cls).values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "modal"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Modal endpoint."""
params = self.model_kwargs or {}
params = {**params, **kwargs}
response = requests.post(
url=self.endpoint_url,
headers={
"Content-Type": "application/json",
},
json={"prompt": prompt, **params},
)
try:
if prompt in response.json()["prompt"]:
response_json = response.json()
except KeyError:
raise KeyError("LangChain requires 'prompt' key in response.")
text = response_json["prompt"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/mlflow_ai_gateway.py | from __future__ import annotations
import warnings
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import BaseModel
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Params(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Parameters for the MLflow AI Gateway LLM."""
temperature: float = 0.0
candidate_count: int = 1
"""The number of candidates to return."""
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class MlflowAIGateway(LLM):
"""MLflow AI Gateway LLMs.
To use, you should have the ``mlflow[gateway]`` python package installed.
For more information, see https://mlflow.org/docs/latest/gateway/index.html.
Example:
.. code-block:: python
from langchain_community.llms import MlflowAIGateway
completions = MlflowAIGateway(
gateway_uri="<your-mlflow-ai-gateway-uri>",
route="<your-mlflow-ai-gateway-completions-route>",
params={
"temperature": 0.1
}
)
"""
route: str
gateway_uri: Optional[str] = None
params: Optional[Params] = None
def __init__(self, **kwargs: Any):
warnings.warn(
"`MlflowAIGateway` is deprecated. Use `Mlflow` or `Databricks` instead.",
DeprecationWarning,
)
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"route": self.route,
**(self.params.dict() if self.params else {}),
}
return params
@property
def _identifying_params(self) -> Mapping[str, Any]:
return self._default_params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
resp = mlflow.gateway.query(self.route, data=data)
return resp["candidates"][0]["text"]
@property
def _llm_type(self) -> str:
return "mlflow-ai-gateway"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/chatglm.py | import logging
from typing import Any, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class ChatGLM(LLM):
"""ChatGLM LLM service.
Example:
.. code-block:: python
from langchain_community.llms import ChatGLM
endpoint_url = (
"http://127.0.0.1:8000"
)
ChatGLM_llm = ChatGLM(
endpoint_url=endpoint_url
)
"""
endpoint_url: str = "http://127.0.0.1:8000/"
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
max_token: int = 20000
"""Max token allowed to pass to the model."""
temperature: float = 0.1
"""LLM model temperature from 0 to 10."""
history: List[List] = []
"""History of the conversation"""
top_p: float = 0.7
"""Top P for nucleus sampling from 0 to 1"""
with_history: bool = False
"""Whether to use history or not"""
@property
def _llm_type(self) -> str:
return "chat_glm"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to a ChatGLM LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = chatglm_llm.invoke("Who are you?")
"""
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {"Content-Type": "application/json"}
payload = {
"prompt": prompt,
"temperature": self.temperature,
"history": self.history,
"max_length": self.max_token,
"top_p": self.top_p,
}
payload.update(_model_kwargs)
payload.update(kwargs)
logger.debug(f"ChatGLM payload: {payload}")
# call api
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
logger.debug(f"ChatGLM response: {response}")
if response.status_code != 200:
raise ValueError(f"Failed with response: {response}")
try:
parsed_response = response.json()
# Check if response content does exists
if isinstance(parsed_response, dict):
content_keys = "response"
if content_keys in parsed_response:
text = parsed_response[content_keys]
else:
raise ValueError(f"No content in response : {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised during decoding response from inference endpoint: {e}."
f"\nResponse: {response.text}"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
if self.with_history:
self.history = parsed_response["history"]
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/deepsparse.py | # flake8: noqa
from langchain_core.utils import pre_init
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
from langchain_core.utils import pre_init
from pydantic import root_validator
from langchain_core.utils import pre_init
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.utils import pre_init
from langchain_core.language_models.llms import LLM
from langchain_core.utils import pre_init
from langchain_community.llms.utils import enforce_stop_tokens
from langchain_core.utils import pre_init
from langchain_core.outputs import GenerationChunk
class DeepSparse(LLM):
"""Neural Magic DeepSparse LLM interface.
To use, you should have the ``deepsparse`` or ``deepsparse-nightly``
python package installed. See https://github.com/neuralmagic/deepsparse
This interface let's you deploy optimized LLMs straight from the
[SparseZoo](https://sparsezoo.neuralmagic.com/?useCase=text_generation)
Example:
.. code-block:: python
from langchain_community.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
""" # noqa: E501
pipeline: Any #: :meta private:
model: str
"""The path to a model file or directory or the name of a SparseZoo model stub."""
model_configuration: Optional[Dict[str, Any]] = None
"""Keyword arguments passed to the pipeline construction.
Common parameters are sequence_length, prompt_sequence_length"""
generation_config: Union[None, str, Dict] = None
"""GenerationConfig dictionary consisting of parameters used to control
sequences generated for each prompt. Common parameters are:
max_length, max_new_tokens, num_return_sequences, output_scores,
top_p, top_k, repetition_penalty."""
streaming: bool = False
"""Whether to stream the results, token by token."""
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_config": self.model_configuration,
"generation_config": self.generation_config,
"streaming": self.streaming,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepsparse"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that ``deepsparse`` package is installed."""
try:
from deepsparse import Pipeline
except ImportError:
raise ImportError(
"Could not import `deepsparse` package. "
"Please install it with `pip install deepsparse[llm]`"
)
model_config = values["model_configuration"] or {}
values["pipeline"] = Pipeline.create(
task="text_generation",
model_path=values["model"],
**model_config,
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
llm.invoke("Tell me a joke.")
"""
if self.streaming:
combined_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_output += chunk.text
text = combined_output
else:
text = (
self.pipeline(sequences=prompt, **self.generation_config)
.generations[0]
.text
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import DeepSparse
llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none")
llm.invoke("Tell me a joke.")
"""
if self.streaming:
combined_output = ""
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
combined_output += chunk.text
text = combined_output
else:
text = (
self.pipeline(sequences=prompt, **self.generation_config)
.generations[0]
.text
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
from langchain_community.llms import DeepSparse
llm = DeepSparse(
model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none",
streaming=True
)
for chunk in llm.stream("Tell me a joke",
stop=["'","\n"]):
print(chunk, end='', flush=True) # noqa: T201
"""
inference = self.pipeline(
sequences=prompt, streaming=True, **self.generation_config
)
for token in inference:
chunk = GenerationChunk(text=token.generations[0].text)
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like object containing a string token.
Example:
.. code-block:: python
from langchain_community.llms import DeepSparse
llm = DeepSparse(
model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none",
streaming=True
)
for chunk in llm.stream("Tell me a joke",
stop=["'","\n"]):
print(chunk, end='', flush=True) # noqa: T201
"""
inference = self.pipeline(
sequences=prompt, streaming=True, **self.generation_config
)
for token in inference:
chunk = GenerationChunk(text=token.generations[0].text)
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/llamafile.py | from __future__ import annotations
import json
from io import StringIO
from typing import Any, Dict, Iterator, List, Optional
import requests
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_pydantic_field_names
from pydantic import ConfigDict
class Llamafile(LLM):
"""Llamafile lets you distribute and run large language models with a
single file.
To get started, see: https://github.com/Mozilla-Ocho/llamafile
To use this class, you will need to first:
1. Download a llamafile.
2. Make the downloaded file executable: `chmod +x path/to/model.llamafile`
3. Start the llamafile in server mode:
`./path/to/model.llamafile --server --nobrowser`
Example:
.. code-block:: python
from langchain_community.llms import Llamafile
llm = Llamafile()
llm.invoke("Tell me a joke.")
"""
base_url: str = "http://localhost:8080"
"""Base url where the llamafile server is listening."""
request_timeout: Optional[int] = None
"""Timeout for server requests"""
streaming: bool = False
"""Allows receiving each predicted token in real-time instead of
waiting for the completion to finish. To enable this, set to true."""
# Generation options
seed: int = -1
"""Random Number Generator (RNG) seed. A random seed is used if this is
less than zero. Default: -1"""
temperature: float = 0.8
"""Temperature. Default: 0.8"""
top_k: int = 40
"""Limit the next token selection to the K most probable tokens.
Default: 40."""
top_p: float = 0.95
"""Limit the next token selection to a subset of tokens with a cumulative
probability above a threshold P. Default: 0.95."""
min_p: float = 0.05
"""The minimum probability for a token to be considered, relative to
the probability of the most likely token. Default: 0.05."""
n_predict: int = -1
"""Set the maximum number of tokens to predict when generating text.
Note: May exceed the set limit slightly if the last token is a partial
multibyte character. When 0, no tokens will be generated but the prompt
is evaluated into the cache. Default: -1 = infinity."""
n_keep: int = 0
"""Specify the number of tokens from the prompt to retain when the
context size is exceeded and tokens need to be discarded. By default,
this value is set to 0 (meaning no tokens are kept). Use -1 to retain all
tokens from the prompt."""
tfs_z: float = 1.0
"""Enable tail free sampling with parameter z. Default: 1.0 = disabled."""
typical_p: float = 1.0
"""Enable locally typical sampling with parameter p.
Default: 1.0 = disabled."""
repeat_penalty: float = 1.1
"""Control the repetition of token sequences in the generated text.
Default: 1.1"""
repeat_last_n: int = 64
"""Last n tokens to consider for penalizing repetition. Default: 64,
0 = disabled, -1 = ctx-size."""
penalize_nl: bool = True
"""Penalize newline tokens when applying the repeat penalty.
Default: true."""
presence_penalty: float = 0.0
"""Repeat alpha presence penalty. Default: 0.0 = disabled."""
frequency_penalty: float = 0.0
"""Repeat alpha frequency penalty. Default: 0.0 = disabled"""
mirostat: int = 0
"""Enable Mirostat sampling, controlling perplexity during text
generation. 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0.
Default: disabled."""
mirostat_tau: float = 5.0
"""Set the Mirostat target entropy, parameter tau. Default: 5.0."""
mirostat_eta: float = 0.1
"""Set the Mirostat learning rate, parameter eta. Default: 0.1."""
model_config = ConfigDict(
extra="forbid",
)
@property
def _llm_type(self) -> str:
return "llamafile"
@property
def _param_fieldnames(self) -> List[str]:
# Return the list of fieldnames that will be passed as configurable
# generation options to the llamafile server. Exclude 'builtin' fields
# from the BaseLLM class like 'metadata' as well as fields that should
# not be passed in requests (base_url, request_timeout).
ignore_keys = [
"base_url",
"cache",
"callback_manager",
"callbacks",
"metadata",
"name",
"request_timeout",
"streaming",
"tags",
"verbose",
"custom_get_token_ids",
]
attrs = [
k for k in get_pydantic_field_names(self.__class__) if k not in ignore_keys
]
return attrs
@property
def _default_params(self) -> Dict[str, Any]:
params = {}
for fieldname in self._param_fieldnames:
params[fieldname] = getattr(self, fieldname)
return params
def _get_parameters(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
params = self._default_params
# Only update keys that are already present in the default config.
# This way, we don't accidentally post unknown/unhandled key/values
# in the request to the llamafile server
for k, v in kwargs.items():
if k in params:
params[k] = v
if stop is not None and len(stop) > 0:
params["stop"] = stop
if self.streaming:
params["stream"] = True
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Request prompt completion from the llamafile server and return the
output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
run_manager:
**kwargs: Any additional options to pass as part of the
generation request.
Returns:
The string generated by the model.
"""
if self.streaming:
with StringIO() as buff:
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
buff.write(chunk.text)
text = buff.getvalue()
return text
else:
params = self._get_parameters(stop=stop, **kwargs)
payload = {"prompt": prompt, **params}
try:
response = requests.post(
url=f"{self.base_url}/completion",
headers={
"Content-Type": "application/json",
},
json=payload,
stream=False,
timeout=self.request_timeout,
)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError(
f"Could not connect to Llamafile server. Please make sure "
f"that a server is running at {self.base_url}."
)
response.raise_for_status()
response.encoding = "utf-8"
text = response.json()["content"]
return text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager:
**kwargs: Any additional options to pass as part of the
generation request.
Returns:
A generator representing the stream of tokens being generated.
Yields:
Dictionary-like objects each containing a token
Example:
.. code-block:: python
from langchain_community.llms import Llamafile
llm = Llamafile(
temperature = 0.0
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
result = chunk["choices"][0]
print(result["text"], end='', flush=True)
"""
params = self._get_parameters(stop=stop, **kwargs)
if "stream" not in params:
params["stream"] = True
payload = {"prompt": prompt, **params}
try:
response = requests.post(
url=f"{self.base_url}/completion",
headers={
"Content-Type": "application/json",
},
json=payload,
stream=True,
timeout=self.request_timeout,
)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError(
f"Could not connect to Llamafile server. Please make sure "
f"that a server is running at {self.base_url}."
)
response.encoding = "utf8"
for raw_chunk in response.iter_lines(decode_unicode=True):
content = self._get_chunk_content(raw_chunk)
chunk = GenerationChunk(text=content)
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
yield chunk
def _get_chunk_content(self, chunk: str) -> str:
"""When streaming is turned on, llamafile server returns lines like:
'data: {"content":" They","multimodal":true,"slot_id":0,"stop":false}'
Here, we convert this to a dict and return the value of the 'content'
field
"""
if chunk.startswith("data:"):
cleaned = chunk.lstrip("data: ")
data = json.loads(cleaned)
return data["content"]
else:
return chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/clarifai.py | import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import Generation, LLMResult
from langchain_core.utils import pre_init
from pydantic import ConfigDict, Field
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
EXAMPLE_URL = "https://clarifai.com/openai/chat-completion/models/GPT-4"
class Clarifai(LLM):
"""Clarifai large language models.
To use, you should have an account on the Clarifai platform,
the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your PAT key,
or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import Clarifai
clarifai_llm = Clarifai(user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
(or)
clarifai_llm = Clarifai(model_url=EXAMPLE_URL)
"""
model_url: Optional[str] = None
"""Model url to use."""
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = Field(default=None, exclude=True) #: :meta private:
"""Clarifai personal access token to use."""
token: Optional[str] = Field(default=None, exclude=True) #: :meta private:
"""Clarifai session token to use."""
model: Any = Field(default=None, exclude=True) #: :meta private:
api_base: str = "https://api.clarifai.com"
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
try:
from clarifai.client.model import Model
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
model_version_id = values.get("model_version_id")
model_url = values.get("model_url")
api_base = values.get("api_base")
pat = values.get("pat")
token = values.get("token")
values["model"] = Model(
url=model_url,
app_id=app_id,
user_id=user_id,
model_version=dict(id=model_version_id),
pat=pat,
token=token,
model_id=model_id,
base_url=api_base,
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Clarifai API."""
return {}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
**{
"model_url": self.model_url,
"user_id": self.user_id,
"app_id": self.app_id,
"model_id": self.model_id,
}
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "clarifai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
inference_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> str:
"""Call out to Clarfai's PostModelOutputs endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = clarifai_llm.invoke("Tell me a joke.")
"""
try:
(inference_params := {}) if inference_params is None else inference_params
predict_response = self.model.predict_by_bytes(
bytes(prompt, "utf-8"),
input_type="text",
inference_params=inference_params,
)
text = predict_response.outputs[0].data.text.raw
if stop is not None:
text = enforce_stop_tokens(text, stop)
except Exception as e:
logger.error(f"Predict failed, exception: {e}")
return text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
inference_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
try:
from clarifai.client.input import Inputs
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
generations = []
batch_size = 32
input_obj = Inputs.from_auth_helper(self.model.auth_helper)
try:
for i in range(0, len(prompts), batch_size):
batch = prompts[i : i + batch_size]
input_batch = [
input_obj.get_text_input(input_id=str(id), raw_text=inp)
for id, inp in enumerate(batch)
]
(
inference_params := {}
) if inference_params is None else inference_params
predict_response = self.model.predict(
inputs=input_batch, inference_params=inference_params
)
for output in predict_response.outputs:
if stop is not None:
text = enforce_stop_tokens(output.data.text.raw, stop)
else:
text = output.data.text.raw
generations.append([Generation(text=text)])
except Exception as e:
logger.error(f"Predict failed, exception: {e}")
return LLMResult(generations=generations)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/moonshot.py | from typing import Any, Dict, List, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
model_validator,
)
from langchain_community.llms.utils import enforce_stop_tokens
MOONSHOT_SERVICE_URL_BASE = "https://api.moonshot.cn/v1"
class _MoonshotClient(BaseModel):
"""An API client that talks to the Moonshot server."""
api_key: SecretStr
"""The API key to use for authentication."""
base_url: str = MOONSHOT_SERVICE_URL_BASE
def completion(self, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_key.get_secret_value()}"}
response = requests.post(
f"{self.base_url}/chat/completions",
headers=headers,
json=request,
)
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
return response.json()["choices"][0]["message"]["content"]
class MoonshotCommon(BaseModel):
"""Common parameters for Moonshot LLMs."""
client: Any
base_url: str = MOONSHOT_SERVICE_URL_BASE
moonshot_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
"""Moonshot API key. Get it here: https://platform.moonshot.cn/console/api-keys"""
model_name: str = Field(default="moonshot-v1-8k", alias="model")
"""Model name. Available models listed here: https://platform.moonshot.cn/pricing"""
max_tokens: int = 1024
"""Maximum number of tokens to generate."""
temperature: float = 0.3
"""Temperature parameter (higher values make the model more creative)."""
model_config = ConfigDict(populate_by_name=True, protected_namespaces=())
@property
def lc_secrets(self) -> dict:
"""A map of constructor argument names to secret ids.
For example,
{"moonshot_api_key": "MOONSHOT_API_KEY"}
"""
return {"moonshot_api_key": "MOONSHOT_API_KEY"}
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **self._default_params}
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra parameters.
Override the superclass method, prevent the model parameter from being
overridden.
"""
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["moonshot_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "moonshot_api_key", "MOONSHOT_API_KEY")
)
values["client"] = _MoonshotClient(
api_key=values["moonshot_api_key"],
base_url=values["base_url"]
if "base_url" in values
else MOONSHOT_SERVICE_URL_BASE,
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "moonshot"
class Moonshot(MoonshotCommon, LLM):
"""Moonshot large language models.
To use, you should have the environment variable ``MOONSHOT_API_KEY`` set with your
API key. Referenced from https://platform.moonshot.cn/docs
Example:
.. code-block:: python
from langchain_community.llms.moonshot import Moonshot
moonshot = Moonshot(model="moonshot-v1-8k")
"""
model_config = ConfigDict(
populate_by_name=True,
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = self._invocation_params
request["messages"] = [{"role": "user", "content": prompt}]
request.update(kwargs)
text = self.client.completion(request)
if stop is not None:
# This is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/rwkv.py | """RWKV models.
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
"""
from typing import Any, Dict, List, Mapping, Optional, Set
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import pre_init
from pydantic import BaseModel, ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
class RWKV(LLM, BaseModel):
"""RWKV language models.
To use, you should have the ``rwkv`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain_community.llms import RWKV
model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32")
# Simplest invocation
response = model.invoke("Once upon a time, ")
"""
model: str
"""Path to the pre-trained RWKV model file."""
tokens_path: str
"""Path to the RWKV tokens file."""
strategy: str = "cpu fp32"
"""Token context window."""
rwkv_verbose: bool = True
"""Print debug information."""
temperature: float = 1.0
"""The temperature to use for sampling."""
top_p: float = 0.5
"""The top-p value to use for sampling."""
penalty_alpha_frequency: float = 0.4
"""Positive values penalize new tokens based on their existing frequency
in the text so far, decreasing the model's likelihood to repeat the same
line verbatim.."""
penalty_alpha_presence: float = 0.4
"""Positive values penalize new tokens based on whether they appear
in the text so far, increasing the model's likelihood to talk about
new topics.."""
CHUNK_LEN: int = 256
"""Batch size for prompt processing."""
max_tokens_per_generation: int = 256
"""Maximum number of tokens to generate."""
client: Any = None #: :meta private:
tokenizer: Any = None #: :meta private:
pipeline: Any = None #: :meta private:
model_tokens: Any = None #: :meta private:
model_state: Any = None #: :meta private:
model_config = ConfigDict(
extra="forbid",
)
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"verbose": self.verbose,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_alpha_frequency": self.penalty_alpha_frequency,
"penalty_alpha_presence": self.penalty_alpha_presence,
"CHUNK_LEN": self.CHUNK_LEN,
"max_tokens_per_generation": self.max_tokens_per_generation,
}
@staticmethod
def _rwkv_param_names() -> Set[str]:
"""Get the identifying parameters."""
return {
"verbose",
}
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
import tokenizers
except ImportError:
raise ImportError(
"Could not import tokenizers python package. "
"Please install it with `pip install tokenizers`."
)
try:
from rwkv.model import RWKV as RWKVMODEL
from rwkv.utils import PIPELINE
values["tokenizer"] = tokenizers.Tokenizer.from_file(values["tokens_path"])
rwkv_keys = cls._rwkv_param_names()
model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys}
model_kwargs["verbose"] = values["rwkv_verbose"]
values["client"] = RWKVMODEL(
values["model"], strategy=values["strategy"], **model_kwargs
)
values["pipeline"] = PIPELINE(values["client"], values["tokens_path"])
except ImportError:
raise ImportError(
"Could not import rwkv python package. "
"Please install it with `pip install rwkv`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params,
**{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "rwkv"
def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any:
AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ",:?!"
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
tokens = [int(x) for x in _tokens]
self.model_tokens += tokens
out: Any = None
while len(tokens) > 0:
out, self.model_state = self.client.forward(
tokens[: self.CHUNK_LEN], self.model_state
)
tokens = tokens[self.CHUNK_LEN :]
END_OF_LINE = 187
out[END_OF_LINE] += newline_adj # adjust \n probability
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out
def rwkv_generate(self, prompt: str) -> str:
self.model_state = None
self.model_tokens = []
logits = self.run_rnn(self.tokenizer.encode(prompt).ids)
begin = len(self.model_tokens)
out_last = begin
occurrence: Dict = {}
decoded = ""
for i in range(self.max_tokens_per_generation):
for n in occurrence:
logits[n] -= (
self.penalty_alpha_presence
+ occurrence[n] * self.penalty_alpha_frequency
)
token = self.pipeline.sample_logits(
logits, temperature=self.temperature, top_p=self.top_p
)
END_OF_TEXT = 0
if token == END_OF_TEXT:
break
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
logits = self.run_rnn([token])
xxx = self.tokenizer.decode(self.model_tokens[out_last:])
if "\ufffd" not in xxx: # avoid utf-8 display issues
decoded += xxx
out_last = begin + i + 1
if i >= self.max_tokens_per_generation - 100:
break
return decoded
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""RWKV generation
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model.invoke(prompt, n_predict=55)
"""
text = self.rwkv_generate(prompt)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/pipelineai.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
model_validator,
)
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class PipelineAI(LLM, BaseModel):
"""PipelineAI large language models.
To use, you should have the ``pipeline-ai`` python package installed,
and the environment variable ``PIPELINE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import PipelineAI
pipeline = PipelineAI(pipeline_key="")
"""
pipeline_key: str = ""
"""The id or tag of the target pipeline"""
pipeline_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any pipeline parameters valid for `create` call not
explicitly specified."""
pipeline_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = set(list(cls.model_fields.keys()))
extra = values.get("pipeline_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to pipeline_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["pipeline_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
pipeline_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "pipeline_api_key", "PIPELINE_API_KEY")
)
values["pipeline_api_key"] = pipeline_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"pipeline_key": self.pipeline_key},
**{"pipeline_kwargs": self.pipeline_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pipeline_ai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Pipeline Cloud endpoint."""
try:
from pipeline import PipelineCloud
except ImportError:
raise ImportError(
"Could not import pipeline-ai python package. "
"Please install it with `pip install pipeline-ai`."
)
client = PipelineCloud(token=self.pipeline_api_key.get_secret_value()) # type: ignore[union-attr]
params = self.pipeline_kwargs or {}
params = {**params, **kwargs}
run = client.run_pipeline(self.pipeline_key, [prompt, params])
try:
text = run.result_preview[0][0]
except AttributeError:
raise AttributeError(
f"A pipeline run should have a `result_preview` attribute."
f"Run was: {run}"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the pipeline parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/aleph_alpha.py | from typing import Any, Dict, List, Optional, Sequence
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, SecretStr
from langchain_community.llms.utils import enforce_stop_tokens
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain_community.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any = None #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[SecretStr] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["aleph_alpha_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY")
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=values["aleph_alpha_api_key"].get_secret_value(),
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha() # type: ignore[call-arg]
print(aa.invoke("How are you?")) # noqa: T201
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/manifest.py | from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import pre_init
from pydantic import ConfigDict
class ManifestWrapper(LLM):
"""HazyResearch's Manifest library."""
client: Any = None #: :meta private:
llm_kwargs: Optional[Dict] = None
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from manifest import Manifest
if not isinstance(values["client"], Manifest):
raise ValueError
except ImportError:
raise ImportError(
"Could not import manifest python package. "
"Please install it with `pip install manifest-ml`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
kwargs = self.llm_kwargs or {}
return {
**self.client.client_pool.get_current_client().get_model_params(),
**kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "manifest"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to LLM through Manifest."""
if stop is not None and len(stop) != 1:
raise NotImplementedError(
f"Manifest currently only supports a single stop token, got {stop}"
)
params = self.llm_kwargs or {}
params = {**params, **kwargs}
if stop is not None:
params["stop_token"] = stop
return self.client.run(prompt, **params)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/bedrock.py | import asyncio
import json
import warnings
from abc import ABC
from typing import (
Any,
AsyncGenerator,
AsyncIterator,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
)
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.llms.utils import enforce_stop_tokens
from langchain_community.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
AMAZON_BEDROCK_TRACE_KEY = "amazon-bedrock-trace"
GUARDRAILS_BODY_KEY = "amazon-bedrock-guardrailAssessment"
HUMAN_PROMPT = "\n\nHuman:"
ASSISTANT_PROMPT = "\n\nAssistant:"
ALTERNATION_ERROR = (
"Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'."
)
def _add_newlines_before_ha(input_text: str) -> str:
new_text = input_text
for word in ["Human:", "Assistant:"]:
new_text = new_text.replace(word, "\n\n" + word)
for i in range(2):
new_text = new_text.replace("\n\n\n" + word, "\n\n" + word)
return new_text
def _human_assistant_format(input_text: str) -> str:
if input_text.count("Human:") == 0 or (
input_text.find("Human:") > input_text.find("Assistant:")
and "Assistant:" in input_text
):
input_text = HUMAN_PROMPT + " " + input_text # SILENT CORRECTION
if input_text.count("Assistant:") == 0:
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
if input_text[: len("Human:")] == "Human:":
input_text = "\n\n" + input_text
input_text = _add_newlines_before_ha(input_text)
count = 0
# track alternation
for i in range(len(input_text)):
if input_text[i : i + len(HUMAN_PROMPT)] == HUMAN_PROMPT:
if count % 2 == 0:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f" Received {input_text}")
if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
if count % 2 == 1:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f" Received {input_text}")
if count % 2 == 1: # Only saw Human, no Assistant
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION
return input_text
def _stream_response_to_generation_chunk(
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
if not stream_response["delta"]:
return GenerationChunk(text="")
return GenerationChunk(
text=stream_response["delta"]["text"],
generation_info=dict(
finish_reason=stream_response.get("stop_reason", None),
),
)
class LLMInputOutputAdapter:
"""Adapter class to prepare the inputs from Langchain to a format
that LLM model expects.
It also provides helper function to extract
the generated text from the model response."""
provider_to_output_key_map = {
"anthropic": "completion",
"amazon": "outputText",
"cohere": "text",
"meta": "generation",
"mistral": "outputs",
}
@classmethod
def prepare_input(
cls,
provider: str,
model_kwargs: Dict[str, Any],
prompt: Optional[str] = None,
system: Optional[str] = None,
messages: Optional[List[Dict]] = None,
) -> Dict[str, Any]:
input_body = {**model_kwargs}
if provider == "anthropic":
if messages:
input_body["anthropic_version"] = "bedrock-2023-05-31"
input_body["messages"] = messages
if system:
input_body["system"] = system
if "max_tokens" not in input_body:
input_body["max_tokens"] = 1024
if prompt:
input_body["prompt"] = _human_assistant_format(prompt)
if "max_tokens_to_sample" not in input_body:
input_body["max_tokens_to_sample"] = 1024
elif provider in ("ai21", "cohere", "meta", "mistral"):
input_body["prompt"] = prompt
elif provider == "amazon":
input_body = dict()
input_body["inputText"] = prompt
input_body["textGenerationConfig"] = {**model_kwargs}
else:
input_body["inputText"] = prompt
return input_body
@classmethod
def prepare_output(cls, provider: str, response: Any) -> dict:
text = ""
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
if "completion" in response_body:
text = response_body.get("completion")
elif "content" in response_body:
content = response_body.get("content")
text = content[0].get("text")
else:
response_body = json.loads(response.get("body").read())
if provider == "ai21":
text = response_body.get("completions")[0].get("data").get("text")
elif provider == "cohere":
text = response_body.get("generations")[0].get("text")
elif provider == "meta":
text = response_body.get("generation")
elif provider == "mistral":
text = response_body.get("outputs")[0].get("text")
else:
text = response_body.get("results")[0].get("outputText")
headers = response.get("ResponseMetadata", {}).get("HTTPHeaders", {})
prompt_tokens = int(headers.get("x-amzn-bedrock-input-token-count", 0))
completion_tokens = int(headers.get("x-amzn-bedrock-output-token-count", 0))
return {
"text": text,
"body": response_body,
"usage": {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
},
}
@classmethod
def prepare_output_stream(
cls,
provider: str,
response: Any,
stop: Optional[List[str]] = None,
messages_api: bool = False,
) -> Iterator[GenerationChunk]:
stream = response.get("body")
if not stream:
return
if messages_api:
output_key = "message"
else:
output_key = cls.provider_to_output_key_map.get(provider, "")
if not output_key:
raise ValueError(
f"Unknown streaming response output key for provider: {provider}"
)
for event in stream:
chunk = event.get("chunk")
if not chunk:
continue
chunk_obj = json.loads(chunk.get("bytes").decode())
if provider == "cohere" and (
chunk_obj["is_finished"] or chunk_obj[output_key] == "<EOS_TOKEN>"
):
return
elif (
provider == "mistral"
and chunk_obj.get(output_key, [{}])[0].get("stop_reason", "") == "stop"
):
return
elif messages_api and (chunk_obj.get("type") == "content_block_stop"):
return
if messages_api and chunk_obj.get("type") in (
"message_start",
"content_block_start",
"content_block_delta",
):
if chunk_obj.get("type") == "content_block_delta":
chk = _stream_response_to_generation_chunk(chunk_obj)
yield chk
else:
continue
else:
# chunk obj format varies with provider
yield GenerationChunk(
text=(
chunk_obj[output_key]
if provider != "mistral"
else chunk_obj[output_key][0]["text"]
),
generation_info={
GUARDRAILS_BODY_KEY: (
chunk_obj.get(GUARDRAILS_BODY_KEY)
if GUARDRAILS_BODY_KEY in chunk_obj
else None
),
},
)
@classmethod
async def aprepare_output_stream(
cls, provider: str, response: Any, stop: Optional[List[str]] = None
) -> AsyncIterator[GenerationChunk]:
stream = response.get("body")
if not stream:
return
output_key = cls.provider_to_output_key_map.get(provider, None)
if not output_key:
raise ValueError(
f"Unknown streaming response output key for provider: {provider}"
)
for event in stream:
chunk = event.get("chunk")
if not chunk:
continue
chunk_obj = json.loads(chunk.get("bytes").decode())
if provider == "cohere" and (
chunk_obj["is_finished"] or chunk_obj[output_key] == "<EOS_TOKEN>"
):
return
if (
provider == "mistral"
and chunk_obj.get(output_key, [{}])[0].get("stop_reason", "") == "stop"
):
return
yield GenerationChunk(
text=(
chunk_obj[output_key]
if provider != "mistral"
else chunk_obj[output_key][0]["text"]
)
)
class BedrockBase(BaseModel, ABC):
"""Base class for Bedrock models."""
model_config = ConfigDict(protected_namespaces=())
client: Any = Field(exclude=True) #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = Field(default=None, exclude=True)
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
config: Any = None
"""An optional botocore.config.Config instance to pass to the client."""
provider: Optional[str] = None
"""The model provider, e.g., amazon, cohere, ai21, etc. When not supplied, provider
is extracted from the first part of the model_id e.g. 'amazon' in
'amazon.titan-text-express-v1'. This value should be provided for model ids that do
not have the provider in them, e.g., custom and provisioned models that have an ARN
associated with them."""
model_id: str
"""Id of the model to call, e.g., amazon.titan-text-express-v1, this is
equivalent to the modelId property in the list-foundation-models api. For custom and
provisioned models, an ARN value is expected."""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_url: Optional[str] = None
"""Needed if you don't want to default to us-east-1 endpoint"""
streaming: bool = False
"""Whether to stream the results."""
provider_stop_sequence_key_name_map: Mapping[str, str] = {
"anthropic": "stop_sequences",
"amazon": "stopSequences",
"ai21": "stop_sequences",
"cohere": "stop_sequences",
"mistral": "stop",
}
guardrails: Optional[Mapping[str, Any]] = {
"id": None,
"version": None,
"trace": False,
}
"""
An optional dictionary to configure guardrails for Bedrock.
This field 'guardrails' consists of two keys: 'id' and 'version',
which should be strings, but are initialized to None. It's used to
determine if specific guardrails are enabled and properly set.
Type:
Optional[Mapping[str, str]]: A mapping with 'id' and 'version' keys.
Example:
llm = Bedrock(model_id="<model_id>", client=<bedrock_client>,
model_kwargs={},
guardrails={
"id": "<guardrail_id>",
"version": "<guardrail_version>"})
To enable tracing for guardrails, set the 'trace' key to True and pass a callback handler to the
'run_manager' parameter of the 'generate', '_call' methods.
Example:
llm = Bedrock(model_id="<model_id>", client=<bedrock_client>,
model_kwargs={},
guardrails={
"id": "<guardrail_id>",
"version": "<guardrail_version>",
"trace": True},
callbacks=[BedrockAsyncCallbackHandler()])
[https://python.langchain.com/docs/modules/callbacks/] for more information on callback handlers.
class BedrockAsyncCallbackHandler(AsyncCallbackHandler):
async def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> Any:
reason = kwargs.get("reason")
if reason == "GUARDRAIL_INTERVENED":
...Logic to handle guardrail intervention...
""" # noqa: E501
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
# Skip creating new client if passed in constructor
if values.get("client") is not None:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
values["region_name"] = get_from_dict_or_env(
values,
"region_name",
"AWS_DEFAULT_REGION",
default=session.region_name,
)
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
if values["endpoint_url"]:
client_params["endpoint_url"] = values["endpoint_url"]
if values["config"]:
client_params["config"] = values["config"]
values["client"] = session.client("bedrock-runtime", **client_params)
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except ValueError as e:
raise ValueError(f"Error raised by bedrock service: {e}")
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
f"profile name are valid. Bedrock error: {e}"
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
def _get_provider(self) -> str:
if self.provider:
return self.provider
if self.model_id.startswith("arn"):
raise ValueError(
"Model provider should be supplied when passing a model ARN as "
"model_id"
)
return self.model_id.split(".")[0]
@property
def _model_is_anthropic(self) -> bool:
return self._get_provider() == "anthropic"
@property
def _guardrails_enabled(self) -> bool:
"""
Determines if guardrails are enabled and correctly configured.
Checks if 'guardrails' is a dictionary with non-empty 'id' and 'version' keys.
Checks if 'guardrails.trace' is true.
Returns:
bool: True if guardrails are correctly configured, False otherwise.
Raises:
TypeError: If 'guardrails' lacks 'id' or 'version' keys.
"""
try:
return (
isinstance(self.guardrails, dict)
and bool(self.guardrails["id"])
and bool(self.guardrails["version"])
)
except KeyError as e:
raise TypeError(
"Guardrails must be a dictionary with 'id' and 'version' keys."
) from e
def _get_guardrails_canonical(self) -> Dict[str, Any]:
"""
The canonical way to pass in guardrails to the bedrock service
adheres to the following format:
"amazon-bedrock-guardrailDetails": {
"guardrailId": "string",
"guardrailVersion": "string"
}
"""
return {
"amazon-bedrock-guardrailDetails": {
"guardrailId": self.guardrails.get("id"), # type: ignore[union-attr]
"guardrailVersion": self.guardrails.get("version"), # type: ignore[union-attr]
}
}
def _prepare_input_and_invoke(
self,
prompt: Optional[str] = None,
system: Optional[str] = None,
messages: Optional[List[Dict]] = None,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Tuple[str, Dict[str, Any]]:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
params = {**_model_kwargs, **kwargs}
if self._guardrails_enabled:
params.update(self._get_guardrails_canonical())
input_body = LLMInputOutputAdapter.prepare_input(
provider=provider,
model_kwargs=params,
prompt=prompt,
system=system,
messages=messages,
)
body = json.dumps(input_body)
accept = "application/json"
contentType = "application/json"
request_options = {
"body": body,
"modelId": self.model_id,
"accept": accept,
"contentType": contentType,
}
if self._guardrails_enabled:
request_options["guardrail"] = "ENABLED"
if self.guardrails.get("trace"): # type: ignore[union-attr]
request_options["trace"] = "ENABLED"
try:
response = self.client.invoke_model(**request_options)
text, body, usage_info = LLMInputOutputAdapter.prepare_output(
provider, response
).values()
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
# Verify and raise a callback error if any intervention occurs or a signal is
# sent from a Bedrock service,
# such as when guardrails are triggered.
services_trace = self._get_bedrock_services_signal(body) # type: ignore[arg-type]
if services_trace.get("signal") and run_manager is not None:
run_manager.on_llm_error(
Exception(
f"Error raised by bedrock service: {services_trace.get('reason')}"
),
**services_trace,
)
return text, usage_info
def _get_bedrock_services_signal(self, body: dict) -> dict:
"""
This function checks the response body for an interrupt flag or message that indicates
whether any of the Bedrock services have intervened in the processing flow. It is
primarily used to identify modifications or interruptions imposed by these services
during the request-response cycle with a Large Language Model (LLM).
""" # noqa: E501
if (
self._guardrails_enabled
and self.guardrails.get("trace") # type: ignore[union-attr]
and self._is_guardrails_intervention(body)
):
return {
"signal": True,
"reason": "GUARDRAIL_INTERVENED",
"trace": body.get(AMAZON_BEDROCK_TRACE_KEY),
}
return {
"signal": False,
"reason": None,
"trace": None,
}
def _is_guardrails_intervention(self, body: dict) -> bool:
return body.get(GUARDRAILS_BODY_KEY) == "GUARDRAIL_INTERVENED"
def _prepare_input_and_invoke_stream(
self,
prompt: Optional[str] = None,
system: Optional[str] = None,
messages: Optional[List[Dict]] = None,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
if stop:
if provider not in self.provider_stop_sequence_key_name_map:
raise ValueError(
f"Stop sequence key name for {provider} is not supported."
)
# stop sequence from _generate() overrides
# stop sequences in the class attribute
_model_kwargs[self.provider_stop_sequence_key_name_map.get(provider)] = stop
if provider == "cohere":
_model_kwargs["stream"] = True
params = {**_model_kwargs, **kwargs}
if self._guardrails_enabled:
params.update(self._get_guardrails_canonical())
input_body = LLMInputOutputAdapter.prepare_input(
provider=provider,
prompt=prompt,
system=system,
messages=messages,
model_kwargs=params,
)
body = json.dumps(input_body)
request_options = {
"body": body,
"modelId": self.model_id,
"accept": "application/json",
"contentType": "application/json",
}
if self._guardrails_enabled:
request_options["guardrail"] = "ENABLED"
if self.guardrails.get("trace"): # type: ignore[union-attr]
request_options["trace"] = "ENABLED"
try:
response = self.client.invoke_model_with_response_stream(**request_options)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
for chunk in LLMInputOutputAdapter.prepare_output_stream(
provider, response, stop, True if messages else False
):
# verify and raise callback error if any middleware intervened
self._get_bedrock_services_signal(chunk.generation_info) # type: ignore[arg-type]
if run_manager is not None:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _aprepare_input_and_invoke_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
_model_kwargs = self.model_kwargs or {}
provider = self._get_provider()
if stop:
if provider not in self.provider_stop_sequence_key_name_map:
raise ValueError(
f"Stop sequence key name for {provider} is not supported."
)
_model_kwargs[self.provider_stop_sequence_key_name_map.get(provider)] = stop
if provider == "cohere":
_model_kwargs["stream"] = True
params = {**_model_kwargs, **kwargs}
input_body = LLMInputOutputAdapter.prepare_input(
provider=provider, prompt=prompt, model_kwargs=params
)
body = json.dumps(input_body)
response = await asyncio.get_running_loop().run_in_executor(
None,
lambda: self.client.invoke_model_with_response_stream(
body=body,
modelId=self.model_id,
accept="application/json",
contentType="application/json",
),
)
async for chunk in LLMInputOutputAdapter.aprepare_output_stream(
provider, response, stop
):
if run_manager is not None and asyncio.iscoroutinefunction(
run_manager.on_llm_new_token
):
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
elif run_manager is not None:
run_manager.on_llm_new_token(chunk.text, chunk=chunk) # type: ignore[unused-coroutine]
yield chunk
@deprecated(
since="0.0.34", removal="1.0", alternative_import="langchain_aws.BedrockLLM"
)
class Bedrock(LLM, BedrockBase):
"""Bedrock models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from bedrock_langchain.bedrock_llm import BedrockLLM
llm = BedrockLLM(
credentials_profile_name="default",
model_id="amazon.titan-text-express-v1",
streaming=True
)
"""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
model_id = values["model_id"]
if model_id.startswith("anthropic.claude-3"):
raise ValueError(
"Claude v3 models are not supported by this LLM."
"Please use `from langchain_community.chat_models import BedrockChat` "
"instead."
)
return super().validate_environment(values)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_bedrock"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "bedrock"]
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.region_name:
attributes["region_name"] = self.region_name
return attributes
model_config = ConfigDict(
extra="forbid",
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call out to Bedrock service with streaming.
Args:
prompt (str): The prompt to pass into the model
stop (Optional[List[str]], optional): Stop sequences. These will
override any stop sequences in the `model_kwargs` attribute.
Defaults to None.
run_manager (Optional[CallbackManagerForLLMRun], optional): Callback
run managers used to process the output. Defaults to None.
Returns:
Iterator[GenerationChunk]: Generator that yields the streamed responses.
Yields:
Iterator[GenerationChunk]: Responses from the model.
"""
return self._prepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm.invoke("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
text, _ = self._prepare_input_and_invoke(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
return text
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncGenerator[GenerationChunk, None]:
"""Call out to Bedrock service with streaming.
Args:
prompt (str): The prompt to pass into the model
stop (Optional[List[str]], optional): Stop sequences. These will
override any stop sequences in the `model_kwargs` attribute.
Defaults to None.
run_manager (Optional[CallbackManagerForLLMRun], optional): Callback
run managers used to process the output. Defaults to None.
Yields:
AsyncGenerator[GenerationChunk, None]: Generator that asynchronously yields
the streamed responses.
"""
async for chunk in self._aprepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
yield chunk
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = await llm._acall("Tell me a joke.")
"""
if not self.streaming:
raise ValueError("Streaming must be set to True for async operations. ")
chunks = [
chunk.text
async for chunk in self._astream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
)
]
return "".join(chunks)
def get_num_tokens(self, text: str) -> int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
def get_token_ids(self, text: str) -> List[int]:
if self._model_is_anthropic:
return get_token_ids_anthropic(text)
else:
return super().get_token_ids(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/mosaicml.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
INSTRUCTION_KEY = "### Instruction:"
RESPONSE_KEY = "### Response:"
INTRO_BLURB = (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request."
)
PROMPT_FOR_GENERATION_FORMAT = """{intro}
{instruction_key}
{instruction}
{response_key}
""".format(
intro=INTRO_BLURB,
instruction_key=INSTRUCTION_KEY,
instruction="{instruction}",
response_key=RESPONSE_KEY,
)
class MosaicML(LLM):
"""MosaicML LLM service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import MosaicML
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
mosaic_llm = MosaicML(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
"""Endpoint URL to use."""
inject_instruction_format: bool = False
"""Whether to inject the instruction format into the prompt."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "mosaic"
def _transform_prompt(self, prompt: str) -> str:
"""Transform prompt."""
if self.inject_instruction_format:
prompt = PROMPT_FOR_GENERATION_FORMAT.format(
instruction=prompt,
)
return prompt
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
is_retry: bool = False,
**kwargs: Any,
) -> str:
"""Call out to a MosaicML LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = mosaic_llm.invoke("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
prompt = self._transform_prompt(prompt)
payload = {"inputs": [prompt]}
payload.update(_model_kwargs)
payload.update(kwargs)
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
if response.status_code == 429:
if not is_retry:
import time
time.sleep(self.retry_sleep)
return self._call(prompt, stop, run_manager, is_retry=True)
raise ValueError(
f"Error raised by inference API: rate limit exceeded.\nResponse: "
f"{response.text}"
)
parsed_response = response.json()
# The inference API has changed a couple of times, so we add some handling
# to be robust to multiple response formats.
if isinstance(parsed_response, dict):
output_keys = ["data", "output", "outputs"]
for key in output_keys:
if key in parsed_response:
output_item = parsed_response[key]
break
else:
raise ValueError(
f"No valid key ({', '.join(output_keys)}) in response:"
f" {parsed_response}"
)
if isinstance(output_item, list):
text = output_item[0]
else:
text = output_item
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
# Older versions of the API include the input in the output response
if text.startswith(prompt):
text = text[len(prompt) :]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
# TODO: replace when MosaicML supports custom stop tokens natively
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/sparkllm.py | from __future__ import annotations
import base64
import hashlib
import hmac
import json
import logging
import queue
import threading
from datetime import datetime
from queue import Queue
from time import mktime
from typing import Any, Dict, Generator, Iterator, List, Optional
from urllib.parse import urlencode, urlparse, urlunparse
from wsgiref.handlers import format_date_time
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import Field
logger = logging.getLogger(__name__)
class SparkLLM(LLM):
"""iFlyTek Spark completion model integration.
Setup:
To use, you should set environment variables ``IFLYTEK_SPARK_APP_ID``,
``IFLYTEK_SPARK_API_KEY`` and ``IFLYTEK_SPARK_API_SECRET``.
.. code-block:: bash
export IFLYTEK_SPARK_APP_ID="your-app-id"
export IFLYTEK_SPARK_API_KEY="your-api-key"
export IFLYTEK_SPARK_API_SECRET="your-api-secret"
Key init args — completion params:
model: Optional[str]
Name of IFLYTEK SPARK model to use.
temperature: Optional[float]
Sampling temperature.
top_k: Optional[float]
What search sampling control to use.
streaming: Optional[bool]
Whether to stream the results or not.
Key init args — client params:
app_id: Optional[str]
IFLYTEK SPARK API KEY. Automatically inferred from env var `IFLYTEK_SPARK_APP_ID` if not provided.
api_key: Optional[str]
IFLYTEK SPARK API KEY. If not passed in will be read from env var IFLYTEK_SPARK_API_KEY.
api_secret: Optional[str]
IFLYTEK SPARK API SECRET. If not passed in will be read from env var IFLYTEK_SPARK_API_SECRET.
api_url: Optional[str]
Base URL for API requests.
timeout: Optional[int]
Timeout for requests.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.llms import SparkLLM
llm = SparkLLM(
app_id="your-app-id",
api_key="your-api_key",
api_secret="your-api-secret",
# model='Spark4.0 Ultra',
# temperature=...,
# other params...
)
Invoke:
.. code-block:: python
input_text = "用50个字左右阐述,生命的意义在于"
llm.invoke(input_text)
.. code-block:: python
'生命的意义在于实现自我价值,追求内心的平静与快乐,同时为他人和社会带来正面影响。'
Stream:
.. code-block:: python
for chunk in llm.stream(input_text):
print(chunk)
.. code-block:: python
生命 | 的意义在于 | 不断探索和 | 实现个人潜能,通过 | 学习 | 、成长和对社会 | 的贡献,追求内心的满足和幸福。
Async:
.. code-block:: python
await llm.ainvoke(input_text)
# stream:
# async for chunk in llm.astream(input_text):
# print(chunk)
# batch:
# await llm.abatch([input_text])
.. code-block:: python
'生命的意义在于实现自我价值,追求内心的平静与快乐,同时为他人和社会带来正面影响。'
""" # noqa: E501
client: Any = None #: :meta private:
spark_app_id: Optional[str] = Field(default=None, alias="app_id")
"""Automatically inferred from env var `IFLYTEK_SPARK_APP_ID`
if not provided."""
spark_api_key: Optional[str] = Field(default=None, alias="api_key")
"""IFLYTEK SPARK API KEY. If not passed in will be read from
env var IFLYTEK_SPARK_API_KEY."""
spark_api_secret: Optional[str] = Field(default=None, alias="api_secret")
"""IFLYTEK SPARK API SECRET. If not passed in will be read from
env var IFLYTEK_SPARK_API_SECRET."""
spark_api_url: Optional[str] = Field(default=None, alias="api_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
spark_llm_domain: Optional[str] = Field(default=None, alias="model")
"""Model name to use."""
spark_user_id: str = "lc_user"
streaming: bool = False
"""Whether to stream the results or not."""
request_timeout: int = Field(default=30, alias="timeout")
"""request timeout for chat http requests"""
temperature: float = 0.5
"""What sampling temperature to use."""
top_k: int = 4
"""What search sampling control to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for API call not explicitly specified."""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
values["spark_app_id"] = get_from_dict_or_env(
values,
["spark_app_id", "app_id"],
"IFLYTEK_SPARK_APP_ID",
)
values["spark_api_key"] = get_from_dict_or_env(
values,
["spark_api_key", "api_key"],
"IFLYTEK_SPARK_API_KEY",
)
values["spark_api_secret"] = get_from_dict_or_env(
values,
["spark_api_secret", "api_secret"],
"IFLYTEK_SPARK_API_SECRET",
)
values["spark_api_url"] = get_from_dict_or_env(
values,
["spark_api_url", "api_url"],
"IFLYTEK_SPARK_API_URL",
"wss://spark-api.xf-yun.com/v3.5/chat",
)
values["spark_llm_domain"] = get_from_dict_or_env(
values,
["spark_llm_domain", "model"],
"IFLYTEK_SPARK_LLM_DOMAIN",
"generalv3.5",
)
# put extra params into model_kwargs
values["model_kwargs"]["temperature"] = values["temperature"] or cls.temperature
values["model_kwargs"]["top_k"] = values["top_k"] or cls.top_k
values["client"] = _SparkLLMClient(
app_id=values["spark_app_id"],
api_key=values["spark_api_key"],
api_secret=values["spark_api_secret"],
api_url=values["spark_api_url"],
spark_domain=values["spark_llm_domain"],
model_kwargs=values["model_kwargs"],
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "spark-llm-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling SparkLLM API."""
normal_params = {
"spark_llm_domain": self.spark_llm_domain,
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_k": self.top_k,
"temperature": self.temperature,
}
return {**normal_params, **self.model_kwargs}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an sparkllm for each generation with a prompt.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the llm.
Example:
.. code-block:: python
response = client("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
completion = ""
self.client.arun(
[{"role": "user", "content": prompt}],
self.spark_user_id,
self.model_kwargs,
self.streaming,
)
for content in self.client.subscribe(timeout=self.request_timeout):
if "data" not in content:
continue
completion = content["data"]["content"]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
self.client.run(
[{"role": "user", "content": prompt}],
self.spark_user_id,
self.model_kwargs,
True,
)
for content in self.client.subscribe(timeout=self.request_timeout):
if "data" not in content:
continue
delta = content["data"]
if run_manager:
run_manager.on_llm_new_token(delta)
yield GenerationChunk(text=delta["content"])
class _SparkLLMClient:
"""
Use websocket-client to call the SparkLLM interface provided by Xfyun,
which is the iFlyTek's open platform for AI capabilities
"""
def __init__(
self,
app_id: str,
api_key: str,
api_secret: str,
api_url: Optional[str] = None,
spark_domain: Optional[str] = None,
model_kwargs: Optional[dict] = None,
):
try:
import websocket
self.websocket_client = websocket
except ImportError:
raise ImportError(
"Could not import websocket client python package. "
"Please install it with `pip install websocket-client`."
)
self.api_url = (
"wss://spark-api.xf-yun.com/v3.5/chat" if not api_url else api_url
)
self.app_id = app_id
self.model_kwargs = model_kwargs
self.spark_domain = spark_domain or "generalv3.5"
self.queue: Queue[Dict] = Queue()
self.blocking_message = {"content": "", "role": "assistant"}
self.api_key = api_key
self.api_secret = api_secret
@staticmethod
def _create_url(api_url: str, api_key: str, api_secret: str) -> str:
"""
Generate a request url with an api key and an api secret.
"""
# generate timestamp by RFC1123
date = format_date_time(mktime(datetime.now().timetuple()))
# urlparse
parsed_url = urlparse(api_url)
host = parsed_url.netloc
path = parsed_url.path
signature_origin = f"host: {host}\ndate: {date}\nGET {path} HTTP/1.1"
# encrypt using hmac-sha256
signature_sha = hmac.new(
api_secret.encode("utf-8"),
signature_origin.encode("utf-8"),
digestmod=hashlib.sha256,
).digest()
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding="utf-8")
authorization_origin = f'api_key="{api_key}", algorithm="hmac-sha256", \
headers="host date request-line", signature="{signature_sha_base64}"'
authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(
encoding="utf-8"
)
# generate url
params_dict = {"authorization": authorization, "date": date, "host": host}
encoded_params = urlencode(params_dict)
url = urlunparse(
(
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
parsed_url.params,
encoded_params,
parsed_url.fragment,
)
)
return url
def run(
self,
messages: List[Dict],
user_id: str,
model_kwargs: Optional[dict] = None,
streaming: bool = False,
) -> None:
self.websocket_client.enableTrace(False)
ws = self.websocket_client.WebSocketApp(
_SparkLLMClient._create_url(
self.api_url,
self.api_key,
self.api_secret,
),
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open,
)
ws.messages = messages # type: ignore[attr-defined]
ws.user_id = user_id # type: ignore[attr-defined]
ws.model_kwargs = self.model_kwargs if model_kwargs is None else model_kwargs # type: ignore[attr-defined]
ws.streaming = streaming # type: ignore[attr-defined]
ws.run_forever()
def arun(
self,
messages: List[Dict],
user_id: str,
model_kwargs: Optional[dict] = None,
streaming: bool = False,
) -> threading.Thread:
ws_thread = threading.Thread(
target=self.run,
args=(
messages,
user_id,
model_kwargs,
streaming,
),
)
ws_thread.start()
return ws_thread
def on_error(self, ws: Any, error: Optional[Any]) -> None:
self.queue.put({"error": error})
ws.close()
def on_close(self, ws: Any, close_status_code: int, close_reason: str) -> None:
logger.debug(
{
"log": {
"close_status_code": close_status_code,
"close_reason": close_reason,
}
}
)
self.queue.put({"done": True})
def on_open(self, ws: Any) -> None:
self.blocking_message = {"content": "", "role": "assistant"}
data = json.dumps(
self.gen_params(
messages=ws.messages, user_id=ws.user_id, model_kwargs=ws.model_kwargs
)
)
ws.send(data)
def on_message(self, ws: Any, message: str) -> None:
data = json.loads(message)
code = data["header"]["code"]
if code != 0:
self.queue.put(
{"error": f"Code: {code}, Error: {data['header']['message']}"}
)
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
content = choices["text"][0]["content"]
if ws.streaming:
self.queue.put({"data": choices["text"][0]})
else:
self.blocking_message["content"] += content
if status == 2:
if not ws.streaming:
self.queue.put({"data": self.blocking_message})
usage_data = (
data.get("payload", {}).get("usage", {}).get("text", {})
if data
else {}
)
self.queue.put({"usage": usage_data})
ws.close()
def gen_params(
self, messages: list, user_id: str, model_kwargs: Optional[dict] = None
) -> dict:
data: Dict = {
"header": {"app_id": self.app_id, "uid": user_id},
"parameter": {"chat": {"domain": self.spark_domain}},
"payload": {"message": {"text": messages}},
}
if model_kwargs:
data["parameter"]["chat"].update(model_kwargs)
logger.debug(f"Spark Request Parameters: {data}")
return data
def subscribe(self, timeout: Optional[int] = 30) -> Generator[Dict, None, None]:
while True:
try:
content = self.queue.get(timeout=timeout)
except queue.Empty as _:
raise TimeoutError(
f"SparkLLMClient wait LLM api response timeout {timeout} seconds"
)
if "error" in content:
raise ConnectionError(content["error"])
if "usage" in content:
yield content
continue
if "done" in content:
break
if "data" not in content:
break
yield content
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py | from __future__ import annotations
import logging
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
logger = logging.getLogger(__name__)
class QianfanLLMEndpoint(LLM):
"""Baidu Qianfan completion model integration.
Setup:
Install ``qianfan`` and set environment variables ``QIANFAN_AK``, ``QIANFAN_SK``.
.. code-block:: bash
pip install qianfan
export QIANFAN_AK="your-api-key"
export QIANFAN_SK="your-secret_key"
Key init args — completion params:
model: str
Name of Qianfan model to use.
temperature: Optional[float]
Sampling temperature.
endpoint: Optional[str]
Endpoint of the Qianfan LLM
top_p: Optional[float]
What probability mass to use.
Key init args — client params:
timeout: Optional[int]
Timeout for requests.
api_key: Optional[str]
Qianfan API KEY. If not passed in will be read from env var QIANFAN_AK.
secret_key: Optional[str]
Qianfan SECRET KEY. If not passed in will be read from env var QIANFAN_SK.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.llms import QianfanLLMEndpoint
llm = QianfanLLMEndpoint(
model="ERNIE-3.5-8K",
# api_key="...",
# secret_key="...",
# other params...
)
Invoke:
.. code-block:: python
input_text = "用50个字左右阐述,生命的意义在于"
llm.invoke(input_text)
.. code-block:: python
'生命的意义在于体验、成长、爱与被爱、贡献与传承,以及对未知的勇敢探索与自我超越。'
Stream:
.. code-block:: python
for chunk in llm.stream(input_text):
print(chunk)
.. code-block:: python
生命的意义 | 在于不断探索 | 与成长 | ,实现 | 自我价值,| 给予爱 | 并接受 | 爱, | 在经历 | 中感悟 | ,让 | 短暂的存在 | 绽放出无限 | 的光彩 | 与温暖 | 。
.. code-block:: python
stream = llm.stream(input_text)
full = next(stream)
for chunk in stream:
full += chunk
full
.. code-block::
'生命的意义在于探索、成长、爱与被爱、贡献价值、体验世界之美,以及在有限的时间里追求内心的平和与幸福。'
Async:
.. code-block:: python
await llm.ainvoke(input_text)
# stream:
# async for chunk in llm.astream(input_text):
# print(chunk)
# batch:
# await llm.abatch([input_text])
.. code-block:: python
'生命的意义在于探索、成长、爱与被爱、贡献社会,在有限的时间里追寻无限的可能,实现自我价值,让生活充满色彩与意义。'
""" # noqa: E501
init_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""init kwargs for qianfan client init, such as `query_per_second` which is
associated with qianfan resource object to limit QPS"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra params for model invoke using with `do`."""
client: Any = None
qianfan_ak: Optional[SecretStr] = Field(default=None, alias="api_key")
qianfan_sk: Optional[SecretStr] = Field(default=None, alias="secret_key")
streaming: Optional[bool] = False
"""Whether to stream the results or not."""
model: Optional[str] = Field(default=None)
"""Model name.
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
preset models are mapping to an endpoint.
`model` will be ignored if `endpoint` is set
Default is set by `qianfan` SDK, not here
"""
endpoint: Optional[str] = None
"""Endpoint of the Qianfan LLM, required if custom model used."""
request_timeout: Optional[int] = Field(default=60, alias="timeout")
"""request timeout for chat http requests"""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
"""Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo.
In the case of other model, passing these params will not affect the result.
"""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
values["qianfan_ak"] = convert_to_secret_str(
get_from_dict_or_env(
values,
["qianfan_ak", "api_key"],
"QIANFAN_AK",
default="",
)
)
values["qianfan_sk"] = convert_to_secret_str(
get_from_dict_or_env(
values,
["qianfan_sk", "secret_key"],
"QIANFAN_SK",
default="",
)
)
params = {
**values.get("init_kwargs", {}),
"model": values["model"],
}
if values["qianfan_ak"].get_secret_value() != "":
params["ak"] = values["qianfan_ak"].get_secret_value()
if values["qianfan_sk"].get_secret_value() != "":
params["sk"] = values["qianfan_sk"].get_secret_value()
if values["endpoint"] is not None and values["endpoint"] != "":
params["endpoint"] = values["endpoint"]
try:
import qianfan
values["client"] = qianfan.Completion(**params)
except ImportError:
raise ImportError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "baidu-qianfan-endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Qianfan API."""
normal_params = {
"model": self.model,
"endpoint": self.endpoint,
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
}
return {**normal_params, **self.model_kwargs}
def _convert_prompt_msg_params(
self,
prompt: str,
**kwargs: Any,
) -> dict:
if "streaming" in kwargs:
kwargs["stream"] = kwargs.pop("streaming")
return {
**{"prompt": prompt, "model": self.model},
**self._default_params,
**kwargs,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model.invoke("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
params = self._convert_prompt_msg_params(prompt, **kwargs)
params["stop"] = stop
response_payload = self.client.do(**params)
return response_payload["result"]
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.streaming:
completion = ""
async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
params = self._convert_prompt_msg_params(prompt, **kwargs)
params["stop"] = stop
response_payload = await self.client.ado(**params)
return response_payload["result"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True})
params["stop"] = stop
for res in self.client.do(**params):
if res:
chunk = GenerationChunk(text=res["result"])
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = self._convert_prompt_msg_params(prompt, **{**kwargs, "stream": True})
params["stop"] = stop
async for res in await self.client.ado(**params):
if res:
chunk = GenerationChunk(text=res["result"])
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/huggingface_endpoint.py | import json
import logging
import os
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import (
get_pydantic_field_names,
pre_init,
)
from pydantic import ConfigDict, Field, model_validator
logger = logging.getLogger(__name__)
VALID_TASKS = (
"text2text-generation",
"text-generation",
"summarization",
"conversational",
)
@deprecated(
since="0.0.37",
removal="1.0",
alternative_import="langchain_huggingface.HuggingFaceEndpoint",
)
class HuggingFaceEndpoint(LLM):
"""
HuggingFace Endpoint.
To use this class, you should have installed the ``huggingface_hub`` package, and
the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token,
or given as a named parameter to the constructor.
Example:
.. code-block:: python
# Basic Example (no streaming)
llm = HuggingFaceEndpoint(
endpoint_url="http://localhost:8010/",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
huggingfacehub_api_token="my-api-key"
)
print(llm.invoke("What is Deep Learning?"))
# Streaming response example
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
callbacks = [StreamingStdOutCallbackHandler()]
llm = HuggingFaceEndpoint(
endpoint_url="http://localhost:8010/",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
callbacks=callbacks,
streaming=True,
huggingfacehub_api_token="my-api-key"
)
print(llm.invoke("What is Deep Learning?"))
""" # noqa: E501
endpoint_url: Optional[str] = None
"""Endpoint URL to use."""
repo_id: Optional[str] = None
"""Repo to use."""
huggingfacehub_api_token: Optional[str] = None
max_new_tokens: int = 512
"""Maximum number of generated tokens"""
top_k: Optional[int] = None
"""The number of highest probability vocabulary tokens to keep for
top-k-filtering."""
top_p: Optional[float] = 0.95
"""If set to < 1, only the smallest set of most probable tokens with probabilities
that add up to `top_p` or higher are kept for generation."""
typical_p: Optional[float] = 0.95
"""Typical Decoding mass. See [Typical Decoding for Natural Language
Generation](https://arxiv.org/abs/2202.00666) for more information."""
temperature: Optional[float] = 0.8
"""The value used to module the logits distribution."""
repetition_penalty: Optional[float] = None
"""The parameter for repetition penalty. 1.0 means no penalty.
See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details."""
return_full_text: bool = False
"""Whether to prepend the prompt to the generated text"""
truncate: Optional[int] = None
"""Truncate inputs tokens to the given size"""
stop_sequences: List[str] = Field(default_factory=list)
"""Stop generating tokens if a member of `stop_sequences` is generated"""
seed: Optional[int] = None
"""Random sampling seed"""
inference_server_url: str = ""
"""text-generation-inference instance base url"""
timeout: int = 120
"""Timeout in seconds"""
streaming: bool = False
"""Whether to generate a stream of tokens asynchronously"""
do_sample: bool = False
"""Activate logits sampling"""
watermark: bool = False
"""Watermarking with [A Watermark for Large Language Models]
(https://arxiv.org/abs/2301.10226)"""
server_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any text-generation-inference server parameters not explicitly specified"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `call` not explicitly specified"""
model: str
client: Any = None
async_client: Any = None
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please make sure that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
if "endpoint_url" not in values and "repo_id" not in values:
raise ValueError(
"Please specify an `endpoint_url` or `repo_id` for the model."
)
if "endpoint_url" in values and "repo_id" in values:
raise ValueError(
"Please specify either an `endpoint_url` OR a `repo_id`, not both."
)
values["model"] = values.get("endpoint_url") or values.get("repo_id")
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that package is installed and that the API token is valid."""
try:
from huggingface_hub import login
except ImportError:
raise ImportError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
huggingfacehub_api_token = values["huggingfacehub_api_token"] or os.getenv(
"HUGGINGFACEHUB_API_TOKEN"
)
if huggingfacehub_api_token is not None:
try:
login(token=huggingfacehub_api_token)
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
from huggingface_hub import AsyncInferenceClient, InferenceClient
values["client"] = InferenceClient(
model=values["model"],
timeout=values["timeout"],
token=huggingfacehub_api_token,
**values["server_kwargs"],
)
values["async_client"] = AsyncInferenceClient(
model=values["model"],
timeout=values["timeout"],
token=huggingfacehub_api_token,
**values["server_kwargs"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling text generation inference API."""
return {
"max_new_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"typical_p": self.typical_p,
"temperature": self.temperature,
"repetition_penalty": self.repetition_penalty,
"return_full_text": self.return_full_text,
"truncate": self.truncate,
"stop_sequences": self.stop_sequences,
"seed": self.seed,
"do_sample": self.do_sample,
"watermark": self.watermark,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _invocation_params(
self, runtime_stop: Optional[List[str]], **kwargs: Any
) -> Dict[str, Any]:
params = {**self._default_params, **kwargs}
params["stop_sequences"] = params["stop_sequences"] + (runtime_stop or [])
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint."""
invocation_params = self._invocation_params(stop, **kwargs)
if self.streaming:
completion = ""
for chunk in self._stream(prompt, stop, run_manager, **invocation_params):
completion += chunk.text
return completion
else:
invocation_params["stop"] = invocation_params[
"stop_sequences"
] # porting 'stop_sequences' into the 'stop' argument
response = self.client.post(
json={"inputs": prompt, "parameters": invocation_params},
stream=False,
task=self.task,
)
try:
response_text = json.loads(response.decode())[0]["generated_text"]
except KeyError:
response_text = json.loads(response.decode())["generated_text"]
# Maybe the generation has stopped at one of the stop sequences:
# then we remove this stop sequence from the end of the generated text
for stop_seq in invocation_params["stop_sequences"]:
if response_text[-len(stop_seq) :] == stop_seq:
response_text = response_text[: -len(stop_seq)]
return response_text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
invocation_params = self._invocation_params(stop, **kwargs)
if self.streaming:
completion = ""
async for chunk in self._astream(
prompt, stop, run_manager, **invocation_params
):
completion += chunk.text
return completion
else:
invocation_params["stop"] = invocation_params["stop_sequences"]
response = await self.async_client.post(
json={"inputs": prompt, "parameters": invocation_params},
stream=False,
task=self.task,
)
try:
response_text = json.loads(response.decode())[0]["generated_text"]
except KeyError:
response_text = json.loads(response.decode())["generated_text"]
# Maybe the generation has stopped at one of the stop sequences:
# then remove this stop sequence from the end of the generated text
for stop_seq in invocation_params["stop_sequences"]:
if response_text[-len(stop_seq) :] == stop_seq:
response_text = response_text[: -len(stop_seq)]
return response_text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
for response in self.client.text_generation(
prompt, **invocation_params, stream=True
):
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in response:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if stop_seq_found:
text = response[: response.index(stop_seq_found)]
else:
text = response
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if stop_seq_found:
break
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
async for response in await self.async_client.text_generation(
prompt, **invocation_params, stream=True
):
# identify stop sequence in generated text, if any
stop_seq_found: Optional[str] = None
for stop_seq in invocation_params["stop_sequences"]:
if stop_seq in response:
stop_seq_found = stop_seq
# identify text to yield
text: Optional[str] = None
if stop_seq_found:
text = response[: response.index(stop_seq_found)]
else:
text = response
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if stop_seq_found:
break
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/gpt4all.py | from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import pre_init
from pydantic import ConfigDict, Field
from langchain_community.llms.utils import enforce_stop_tokens
class GPT4All(LLM):
"""GPT4All language models.
To use, you should have the ``gpt4all`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain_community.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_threads=8)
# Simplest invocation
response = model.invoke("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
backend: Optional[str] = Field(None, alias="backend")
max_tokens: int = Field(200, alias="max_tokens")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.7
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.1
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.18
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(8, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
device: Optional[str] = Field("cpu", alias="device")
"""Device name: cpu, gpu, nvidia, intel, amd or DeviceName."""
client: Any = None #: :meta private:
model_config = ConfigDict(
extra="forbid",
)
@staticmethod
def _model_param_names() -> Set[str]:
return {
"max_tokens",
"n_predict",
"top_k",
"top_p",
"temp",
"n_batch",
"repeat_penalty",
"repeat_last_n",
"streaming",
}
def _default_params(self) -> Dict[str, Any]:
return {
"max_tokens": self.max_tokens,
"n_predict": self.n_predict,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
"n_batch": self.n_batch,
"repeat_penalty": self.repeat_penalty,
"repeat_last_n": self.repeat_last_n,
"streaming": self.streaming,
}
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gpt4all import GPT4All as GPT4AllModel
except ImportError:
raise ImportError(
"Could not import gpt4all python package. "
"Please install it with `pip install gpt4all`."
)
full_path = values["model"]
model_path, delimiter, model_name = full_path.rpartition("/")
model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
device=values["device"],
)
if values["n_threads"] is not None:
# set n_threads
values["client"].model.set_thread_count(values["n_threads"])
try:
values["backend"] = values["client"].model_type
except AttributeError:
# The below is for compatibility with GPT4All Python bindings <= 0.2.3.
values["backend"] = values["client"].model.model_type
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model.invoke(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/promptlayer_openai.py | import datetime
from typing import Any, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.outputs import LLMResult
from langchain_community.llms.openai import OpenAI, OpenAIChat
class PromptLayerOpenAI(OpenAI):
"""PromptLayer OpenAI large language models.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAI LLM can also
be passed here. The PromptLayerOpenAI LLM adds two optional
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
.. code-block:: python
from langchain_community.llms import PromptLayerOpenAI
openai = PromptLayerOpenAI(model_name="gpt-3.5-turbo-instruct")
"""
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call OpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {
"text": generation.text,
"llm_output": generated_responses.llm_output,
}
params = {**self._identifying_params, **kwargs}
pl_request_id = promptlayer_api_request(
"langchain.PromptLayerOpenAI",
"langchain",
[prompt],
params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {
"text": generation.text,
"llm_output": generated_responses.llm_output,
}
params = {**self._identifying_params, **kwargs}
pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerOpenAI.async",
"langchain",
[prompt],
params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
class PromptLayerOpenAIChat(OpenAIChat):
"""PromptLayer OpenAI large language models.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAIChat LLM can also
be passed here. The PromptLayerOpenAIChat adds two optional
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
.. code-block:: python
from langchain_community.llms import PromptLayerOpenAIChat
openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo")
"""
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call OpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {
"text": generation.text,
"llm_output": generated_responses.llm_output,
}
params = {**self._identifying_params, **kwargs}
pl_request_id = promptlayer_api_request(
"langchain.PromptLayerOpenAIChat",
"langchain",
[prompt],
params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {
"text": generation.text,
"llm_output": generated_responses.llm_output,
}
params = {**self._identifying_params, **kwargs}
pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerOpenAIChat.async",
"langchain",
[prompt],
params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/together.py | """Wrapper around Together AI's Completion API."""
import logging
from typing import Any, Dict, List, Optional
from aiohttp import ClientSession
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import ConfigDict, SecretStr, model_validator
from langchain_community.utilities.requests import Requests
logger = logging.getLogger(__name__)
@deprecated(
since="0.0.12", removal="1.0", alternative_import="langchain_together.Together"
)
class Together(LLM):
"""LLM models from `Together`.
To use, you'll need an API key which you can find here:
https://api.together.xyz/settings/api-keys. This can be passed in as init param
``together_api_key`` or set as environment variable ``TOGETHER_API_KEY``.
Together AI API reference: https://docs.together.ai/reference/inference
"""
base_url: str = "https://api.together.xyz/inference"
"""Base inference API URL."""
together_api_key: SecretStr
"""Together AI API key. Get it here: https://api.together.xyz/settings/api-keys"""
model: str
"""Model name. Available models listed here:
https://docs.together.ai/docs/inference-models
"""
temperature: Optional[float] = None
"""Model temperature."""
top_p: Optional[float] = None
"""Used to dynamically adjust the number of choices for each predicted token based
on the cumulative probabilities. A value of 1 will always yield the same
output. A temperature less than 1 favors more correctness and is appropriate
for question answering or summarization. A value greater than 1 introduces more
randomness in the output.
"""
top_k: Optional[int] = None
"""Used to limit the number of choices for the next predicted word or token. It
specifies the maximum number of tokens to consider at each step, based on their
probability of occurrence. This technique helps to speed up the generation
process and can improve the quality of the generated text by focusing on the
most likely options.
"""
max_tokens: Optional[int] = None
"""The maximum number of tokens to generate."""
repetition_penalty: Optional[float] = None
"""A number that controls the diversity of generated text by reducing the
likelihood of repeated sequences. Higher values decrease repetition.
"""
logprobs: Optional[int] = None
"""An integer that specifies how many top token log probabilities are included in
the response for each token generation step.
"""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
values["together_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "together_api_key", "TOGETHER_API_KEY")
)
return values
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "together"
def _format_output(self, output: dict) -> str:
return output["output"]["choices"][0]["text"]
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
@property
def default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"max_tokens": self.max_tokens,
"repetition_penalty": self.repetition_penalty,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Together's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model..
"""
headers = {
"Authorization": f"Bearer {self.together_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
request = Requests(headers=headers)
response = request.post(url=self.base_url, data=payload)
if response.status_code >= 500:
raise Exception(f"Together Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"Together received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"Together returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
data = response.json()
if data.get("status") != "finished":
err_msg = data.get("error", "Undefined Error")
raise Exception(err_msg)
output = self._format_output(data)
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call Together model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
"""
headers = {
"Authorization": f"Bearer {self.together_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
stop_to_use = stop[0] if stop and len(stop) == 1 else stop
payload: Dict[str, Any] = {
**self.default_params,
"prompt": prompt,
"stop": stop_to_use,
**kwargs,
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
async with ClientSession() as session:
async with session.post(
self.base_url, json=payload, headers=headers
) as response:
if response.status >= 500:
raise Exception(f"Together Server: Error {response.status}")
elif response.status >= 400:
raise ValueError(
f"Together received an invalid payload: {response.text}"
)
elif response.status != 200:
raise Exception(
f"Together returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
if response_json.get("status") != "finished":
err_msg = response_json.get("error", "Undefined Error")
raise Exception(err_msg)
output = self._format_output(response_json)
return output
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/ai21.py | from typing import Any, Dict, List, Optional, cast
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, SecretStr
class AI21PenaltyData(BaseModel):
"""Parameters for AI21 penalty data."""
scale: int = 0
applyToWhitespaces: bool = True
applyToPunctuations: bool = True
applyToNumbers: bool = True
applyToStopwords: bool = True
applyToEmojis: bool = True
class AI21(LLM):
"""AI21 large language models.
To use, you should have the environment variable ``AI21_API_KEY``
set with your API key or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import AI21
ai21 = AI21(ai21_api_key="my-api-key", model="j2-jumbo-instruct")
"""
model: str = "j2-jumbo-instruct"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
maxTokens: int = 256
"""The maximum number of tokens to generate in the completion."""
minTokens: int = 0
"""The minimum number of tokens to generate in the completion."""
topP: float = 1.0
"""Total probability mass of tokens to consider at each step."""
presencePenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens."""
countPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to count."""
frequencyPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to frequency."""
numResults: int = 1
"""How many completions to generate for each prompt."""
logitBias: Optional[Dict[str, float]] = None
"""Adjust the probability of specific tokens being generated."""
ai21_api_key: Optional[SecretStr] = None
stop: Optional[List[str]] = None
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
ai21_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY")
)
values["ai21_api_key"] = ai21_api_key
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling AI21 API."""
return {
"temperature": self.temperature,
"maxTokens": self.maxTokens,
"minTokens": self.minTokens,
"topP": self.topP,
"presencePenalty": self.presencePenalty.dict(),
"countPenalty": self.countPenalty.dict(),
"frequencyPenalty": self.frequencyPenalty.dict(),
"numResults": self.numResults,
"logitBias": self.logitBias,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ai21"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to AI21's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ai21("Tell me a joke.")
"""
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
elif stop is None:
stop = []
if self.base_url is not None:
base_url = self.base_url
else:
if self.model in ("j1-grande-instruct",):
base_url = "https://api.ai21.com/studio/v1/experimental"
else:
base_url = "https://api.ai21.com/studio/v1"
params = {**self._default_params, **kwargs}
self.ai21_api_key = cast(SecretStr, self.ai21_api_key)
response = requests.post(
url=f"{base_url}/{self.model}/complete",
headers={"Authorization": f"Bearer {self.ai21_api_key.get_secret_value()}"},
json={"prompt": prompt, "stopSequences": stop, **params},
)
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"AI21 /complete call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
response_json = response.json()
return response_json["completions"][0]["data"]["text"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/watsonxllm.py | import logging
import os
from typing import Any, Dict, Iterator, List, Mapping, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, SecretStr
logger = logging.getLogger(__name__)
@deprecated(
since="0.0.18", removal="1.0", alternative_import="langchain_ibm.WatsonxLLM"
)
class WatsonxLLM(BaseLLM):
"""
IBM watsonx.ai large language models.
To use, you should have ``ibm_watsonx_ai`` python package installed,
and the environment variable ``WATSONX_APIKEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from ibm_watsonx_ai.metanames import GenTextParamsMetaNames
parameters = {
GenTextParamsMetaNames.DECODING_METHOD: "sample",
GenTextParamsMetaNames.MAX_NEW_TOKENS: 100,
GenTextParamsMetaNames.MIN_NEW_TOKENS: 1,
GenTextParamsMetaNames.TEMPERATURE: 0.5,
GenTextParamsMetaNames.TOP_K: 50,
GenTextParamsMetaNames.TOP_P: 1,
}
from langchain_community.llms import WatsonxLLM
watsonx_llm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
apikey="*****",
project_id="*****",
params=parameters,
)
"""
model_id: str = ""
"""Type of model to use."""
deployment_id: str = ""
"""Type of deployed model to use."""
project_id: str = ""
"""ID of the Watson Studio project."""
space_id: str = ""
"""ID of the Watson Studio space."""
url: Optional[SecretStr] = None
"""Url to Watson Machine Learning instance"""
apikey: Optional[SecretStr] = None
"""Apikey to Watson Machine Learning instance"""
token: Optional[SecretStr] = None
"""Token to Watson Machine Learning instance"""
password: Optional[SecretStr] = None
"""Password to Watson Machine Learning instance"""
username: Optional[SecretStr] = None
"""Username to Watson Machine Learning instance"""
instance_id: Optional[SecretStr] = None
"""Instance_id of Watson Machine Learning instance"""
version: Optional[SecretStr] = None
"""Version of Watson Machine Learning instance"""
params: Optional[dict] = None
"""Model parameters to use during generate requests."""
verify: Union[str, bool] = ""
"""User can pass as verify one of following:
the path to a CA_BUNDLE file
the path of directory with certificates of trusted CAs
True - default path to truststore will be taken
False - no verification will be made"""
streaming: bool = False
""" Whether to stream the results or not. """
watsonx_model: Any = None
model_config = ConfigDict(
extra="forbid",
)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"url": "WATSONX_URL",
"apikey": "WATSONX_APIKEY",
"token": "WATSONX_TOKEN",
"password": "WATSONX_PASSWORD",
"username": "WATSONX_USERNAME",
"instance_id": "WATSONX_INSTANCE_ID",
}
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that credentials and python package exists in environment."""
values["url"] = convert_to_secret_str(
get_from_dict_or_env(values, "url", "WATSONX_URL")
)
if "cloud.ibm.com" in values.get("url", "").get_secret_value():
values["apikey"] = convert_to_secret_str(
get_from_dict_or_env(values, "apikey", "WATSONX_APIKEY")
)
else:
if (
not values["token"]
and "WATSONX_TOKEN" not in os.environ
and not values["password"]
and "WATSONX_PASSWORD" not in os.environ
and not values["apikey"]
and "WATSONX_APIKEY" not in os.environ
):
raise ValueError(
"Did not find 'token', 'password' or 'apikey',"
" please add an environment variable"
" `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' "
"which contains it,"
" or pass 'token', 'password' or 'apikey'"
" as a named parameter."
)
elif values["token"] or "WATSONX_TOKEN" in os.environ:
values["token"] = convert_to_secret_str(
get_from_dict_or_env(values, "token", "WATSONX_TOKEN")
)
elif values["password"] or "WATSONX_PASSWORD" in os.environ:
values["password"] = convert_to_secret_str(
get_from_dict_or_env(values, "password", "WATSONX_PASSWORD")
)
values["username"] = convert_to_secret_str(
get_from_dict_or_env(values, "username", "WATSONX_USERNAME")
)
elif values["apikey"] or "WATSONX_APIKEY" in os.environ:
values["apikey"] = convert_to_secret_str(
get_from_dict_or_env(values, "apikey", "WATSONX_APIKEY")
)
values["username"] = convert_to_secret_str(
get_from_dict_or_env(values, "username", "WATSONX_USERNAME")
)
if not values["instance_id"] or "WATSONX_INSTANCE_ID" not in os.environ:
values["instance_id"] = convert_to_secret_str(
get_from_dict_or_env(values, "instance_id", "WATSONX_INSTANCE_ID")
)
try:
from ibm_watsonx_ai.foundation_models import ModelInference
credentials = {
"url": values["url"].get_secret_value() if values["url"] else None,
"apikey": (
values["apikey"].get_secret_value() if values["apikey"] else None
),
"token": (
values["token"].get_secret_value() if values["token"] else None
),
"password": (
values["password"].get_secret_value()
if values["password"]
else None
),
"username": (
values["username"].get_secret_value()
if values["username"]
else None
),
"instance_id": (
values["instance_id"].get_secret_value()
if values["instance_id"]
else None
),
"version": (
values["version"].get_secret_value() if values["version"] else None
),
}
credentials_without_none_value = {
key: value for key, value in credentials.items() if value is not None
}
watsonx_model = ModelInference(
model_id=values["model_id"],
deployment_id=values["deployment_id"],
credentials=credentials_without_none_value,
params=values["params"],
project_id=values["project_id"],
space_id=values["space_id"],
verify=values["verify"],
)
values["watsonx_model"] = watsonx_model
except ImportError:
raise ImportError(
"Could not import ibm_watsonx_ai python package. "
"Please install it with `pip install ibm_watsonx_ai`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"deployment_id": self.deployment_id,
"params": self.params,
"project_id": self.project_id,
"space_id": self.space_id,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "IBM watsonx.ai"
@staticmethod
def _extract_token_usage(
response: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
if response is None:
return {"generated_token_count": 0, "input_token_count": 0}
input_token_count = 0
generated_token_count = 0
def get_count_value(key: str, result: Dict[str, Any]) -> int:
return result.get(key, 0) or 0
for res in response:
results = res.get("results")
if results:
input_token_count += get_count_value("input_token_count", results[0])
generated_token_count += get_count_value(
"generated_token_count", results[0]
)
return {
"generated_token_count": generated_token_count,
"input_token_count": input_token_count,
}
def _get_chat_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
params: Dict[str, Any] = {**self.params} if self.params else {}
if stop is not None:
params["stop_sequences"] = stop
return params
def _create_llm_result(self, response: List[dict]) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for res in response:
results = res.get("results")
if results:
finish_reason = results[0].get("stop_reason")
gen = Generation(
text=results[0].get("generated_text"),
generation_info={"finish_reason": finish_reason},
)
generations.append([gen])
final_token_usage = self._extract_token_usage(response)
llm_output = {
"token_usage": final_token_usage,
"model_id": self.model_id,
"deployment_id": self.deployment_id,
}
return LLMResult(generations=generations, llm_output=llm_output)
def _stream_response_to_generation_chunk(
self,
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
if not stream_response["results"]:
return GenerationChunk(text="")
return GenerationChunk(
text=stream_response["results"][0]["generated_text"],
generation_info=dict(
finish_reason=stream_response["results"][0].get("stop_reason", None),
llm_output={
"model_id": self.model_id,
"deployment_id": self.deployment_id,
},
),
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the IBM watsonx.ai inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = watsonx_llm.invoke("What is a molecule")
"""
result = self._generate(
prompts=[prompt], stop=stop, run_manager=run_manager, **kwargs
)
return result.generations[0][0].text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> LLMResult:
"""Call the IBM watsonx.ai inference endpoint which then generate the response.
Args:
prompts: List of strings (prompts) to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The full LLMResult output.
Example:
.. code-block:: python
response = watsonx_llm.generate(["What is a molecule"])
"""
params = self._get_chat_params(stop=stop)
should_stream = stream if stream is not None else self.streaming
if should_stream:
if len(prompts) > 1:
raise ValueError(
f"WatsonxLLM currently only supports single prompt, got {prompts}"
)
generation = GenerationChunk(text="")
stream_iter = self._stream(
prompts[0], stop=stop, run_manager=run_manager, **kwargs
)
for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
if isinstance(generation.generation_info, dict):
llm_output = generation.generation_info.pop("llm_output")
return LLMResult(generations=[[generation]], llm_output=llm_output)
return LLMResult(generations=[[generation]])
else:
response = self.watsonx_model.generate(prompt=prompts, params=params)
return self._create_llm_result(response)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Call the IBM watsonx.ai inference endpoint which then streams the response.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The iterator which yields generation chunks.
Example:
.. code-block:: python
response = watsonx_llm.stream("What is a molecule")
for chunk in response:
print(chunk, end='') # noqa: T201
"""
params = self._get_chat_params(stop=stop)
for stream_resp in self.watsonx_model.generate_text_stream(
prompt=prompt, raw_response=True, params=params
):
chunk = self._stream_response_to_generation_chunk(stream_resp)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/deepinfra.py | import json
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional
import aiohttp
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import ConfigDict
from langchain_community.utilities.requests import Requests
DEFAULT_MODEL_ID = "meta-llama/Meta-Llama-3-70B-Instruct"
class DeepInfra(LLM):
"""DeepInfra models.
To use, you should have the environment variable ``DEEPINFRA_API_TOKEN``
set with your API token, or pass it as a named parameter to the
constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain_community.llms import DeepInfra
di = DeepInfra(model_id="google/flan-t5-xl",
deepinfra_api_token="my-api-key")
"""
model_id: str = DEFAULT_MODEL_ID
model_kwargs: Optional[Dict] = None
deepinfra_api_token: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepinfra"
def _url(self) -> str:
return f"https://api.deepinfra.com/v1/inference/{self.model_id}"
def _headers(self) -> Dict:
return {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
def _body(self, prompt: str, kwargs: Any) -> Dict:
model_kwargs = self.model_kwargs or {}
model_kwargs = {**model_kwargs, **kwargs}
return {
"input": prompt,
**model_kwargs,
}
def _handle_status(self, code: int, text: Any) -> None:
if code >= 500:
raise Exception(f"DeepInfra Server: Error {text}")
elif code == 401:
raise Exception("DeepInfra Server: Unauthorized")
elif code == 403:
raise Exception("DeepInfra Server: Unauthorized")
elif code == 404:
raise Exception(f"DeepInfra Server: Model not found {self.model_id}")
elif code == 429:
raise Exception("DeepInfra Server: Rate limit exceeded")
elif code >= 400:
raise ValueError(f"DeepInfra received an invalid payload: {text}")
elif code != 200:
raise Exception(
f"DeepInfra returned an unexpected response with status "
f"{code}: {text}"
)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
request = Requests(headers=self._headers())
response = request.post(url=self._url(), data=self._body(prompt, kwargs))
self._handle_status(response.status_code, response.text)
data = response.json()
return data["results"][0]["generated_text"]
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(prompt, kwargs)
) as response:
self._handle_status(response.status, response.text)
data = await response.json()
return data["results"][0]["generated_text"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
request = Requests(headers=self._headers())
response = request.post(
url=self._url(), data=self._body(prompt, {**kwargs, "stream": True})
)
response_text = response.text
self._handle_body_errors(response_text)
self._handle_status(response.status_code, response.text)
for line in _parse_stream(response.iter_lines()):
chunk = _handle_sse_line(line)
if chunk:
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
request = Requests(headers=self._headers())
async with request.apost(
url=self._url(), data=self._body(prompt, {**kwargs, "stream": True})
) as response:
response_text = await response.text()
self._handle_body_errors(response_text)
self._handle_status(response.status, response.text)
async for line in _parse_stream_async(response.content):
chunk = _handle_sse_line(line)
if chunk:
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
yield chunk
def _handle_body_errors(self, body: str) -> None:
"""
Example error response:
data: {"error_type": "validation_error",
"error_message": "ConnectionError: ..."}
"""
if "error" in body:
try:
# Remove data: prefix if present
if body.startswith("data:"):
body = body[len("data:") :]
error_data = json.loads(body)
error_message = error_data.get("error_message", "Unknown error")
raise Exception(f"DeepInfra Server Error: {error_message}")
except json.JSONDecodeError:
raise Exception(f"DeepInfra Server: {body}")
def _parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
async def _parse_stream_async(rbody: aiohttp.StreamReader) -> AsyncIterator[str]:
async for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
def _parse_stream_helper(line: bytes) -> Optional[str]:
if line and line.startswith(b"data:"):
if line.startswith(b"data: "):
# SSE event may be valid when it contain whitespace
line = line[len(b"data: ") :]
else:
line = line[len(b"data:") :]
if line.strip() == b"[DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
else:
return line.decode("utf-8")
return None
def _handle_sse_line(line: str) -> Optional[GenerationChunk]:
try:
obj = json.loads(line)
return GenerationChunk(
text=obj.get("token", {}).get("text"),
)
except Exception:
return None
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/sagemaker_endpoint.py | """Sagemaker InvokeEndpoint API."""
import io
import json
from abc import abstractmethod
from typing import Any, Dict, Generic, Iterator, List, Mapping, Optional, TypeVar, Union
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import pre_init
from pydantic import ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
INPUT_TYPE = TypeVar("INPUT_TYPE", bound=Union[str, List[str]])
OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]], Iterator])
class LineIterator:
"""Parse the byte stream input.
The output of the model will be in the following format:
b'{"outputs": [" a"]}\n'
b'{"outputs": [" challenging"]}\n'
b'{"outputs": [" problem"]}\n'
...
While usually each PayloadPart event from the event stream will
contain a byte array with a full json, this is not guaranteed
and some of the json objects may be split acrossPayloadPart events.
For example:
{'PayloadPart': {'Bytes': b'{"outputs": '}}
{'PayloadPart': {'Bytes': b'[" problem"]}\n'}}
This class accounts for this by concatenating bytes written via the 'write' function
and then exposing a method which will return lines (ending with a '\n' character)
within the buffer via the 'scan_lines' function.
It maintains the position of the last read position to ensure
that previous bytes are not exposed again.
For more details see:
https://aws.amazon.com/blogs/machine-learning/elevating-the-generative-ai-experience-introducing-streaming-support-in-amazon-sagemaker-hosting/
"""
def __init__(self, stream: Any) -> None:
self.byte_iterator = iter(stream)
self.buffer = io.BytesIO()
self.read_pos = 0
def __iter__(self) -> "LineIterator":
return self
def __next__(self) -> Any:
while True:
self.buffer.seek(self.read_pos)
line = self.buffer.readline()
if line and line[-1] == ord("\n"):
self.read_pos += len(line)
return line[:-1]
try:
chunk = next(self.byte_iterator)
except StopIteration:
if self.read_pos < self.buffer.getbuffer().nbytes:
continue
raise
if "PayloadPart" not in chunk:
# Unknown Event Type
continue
self.buffer.seek(0, io.SEEK_END)
self.buffer.write(chunk["PayloadPart"]["Bytes"])
class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]):
"""Handler class to transform input from LLM to a
format that SageMaker endpoint expects.
Similarly, the class handles transforming output from the
SageMaker endpoint to a format that LLM class expects.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
content_type: Optional[str] = "text/plain"
"""The MIME type of the input data passed to endpoint"""
accepts: Optional[str] = "text/plain"
"""The MIME type of the response data returned from endpoint"""
@abstractmethod
def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) -> bytes:
"""Transforms the input to a format that model can accept
as the request Body. Should return bytes or seekable file
like object in the format specified in the content_type
request header.
"""
@abstractmethod
def transform_output(self, output: bytes) -> OUTPUT_TYPE:
"""Transforms the output from the model to string that
the LLM class expects.
"""
class LLMContentHandler(ContentHandlerBase[str, str]):
"""Content handler for LLM class."""
class SagemakerEndpoint(LLM):
"""Sagemaker Inference Endpoint models.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Args:
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
client: boto3 client for Sagemaker Endpoint
content_handler: Implementation for model specific LLMContentHandler
Example:
.. code-block:: python
from langchain_community.llms import SagemakerEndpoint
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
#Use with boto3 client
client = boto3.client(
"sagemaker-runtime",
region_name=region_name
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
client=client
)
"""
client: Any = None
"""Boto3 client for sagemaker runtime"""
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: LLMContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
streaming: bool = False
"""Whether to stream the results."""
"""
Example:
.. code-block:: python
from langchain_community.llms.sagemaker_endpoint import LLMContentHandler
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Dont do anything if client provided externally"""
if values.get("client") is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_name": self.endpoint_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "sagemaker_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Sagemaker inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs = {**_model_kwargs, **kwargs}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(prompt, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
if self.streaming and run_manager:
try:
resp = self.client.invoke_endpoint_with_response_stream(
EndpointName=self.endpoint_name,
Body=body,
ContentType=self.content_handler.content_type,
**_endpoint_kwargs,
)
iterator = LineIterator(resp["Body"])
current_completion: str = ""
for line in iterator:
resp = json.loads(line)
resp_output = resp.get("outputs")[0]
if stop is not None:
# Uses same approach as below
resp_output = enforce_stop_tokens(resp_output, stop)
current_completion += resp_output
run_manager.on_llm_new_token(resp_output)
return current_completion
except Exception as e:
raise ValueError(f"Error raised by streaming inference endpoint: {e}")
else:
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
text = self.content_handler.transform_output(response["Body"])
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to the sagemaker endpoint.
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/friendli.py | from __future__ import annotations
import os
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.load.serializable import Serializable
from langchain_core.outputs import GenerationChunk, LLMResult
from langchain_core.utils import pre_init
from langchain_core.utils.env import get_from_dict_or_env
from langchain_core.utils.utils import convert_to_secret_str
from pydantic import Field, SecretStr
def _stream_response_to_generation_chunk(stream_response: Any) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
if stream_response.event == "token_sampled":
return GenerationChunk(
text=stream_response.text,
generation_info={"token": str(stream_response.token)},
)
return GenerationChunk(text="")
class BaseFriendli(Serializable):
"""Base class of Friendli."""
# Friendli client.
client: Any = Field(default=None, exclude=True)
# Friendli Async client.
async_client: Any = Field(default=None, exclude=True)
# Model name to use.
model: str = "mixtral-8x7b-instruct-v0-1"
# Friendli personal access token to run as.
friendli_token: Optional[SecretStr] = None
# Friendli team ID to run as.
friendli_team: Optional[str] = None
# Whether to enable streaming mode.
streaming: bool = False
# Number between -2.0 and 2.0. Positive values penalizes tokens that have been
# sampled, taking into account their frequency in the preceding text. This
# penalization diminishes the model's tendency to reproduce identical lines
# verbatim.
frequency_penalty: Optional[float] = None
# Number between -2.0 and 2.0. Positive values penalizes tokens that have been
# sampled at least once in the existing text.
presence_penalty: Optional[float] = None
# The maximum number of tokens to generate. The length of your input tokens plus
# `max_tokens` should not exceed the model's maximum length (e.g., 2048 for OpenAI
# GPT-3)
max_tokens: Optional[int] = None
# When one of the stop phrases appears in the generation result, the API will stop
# generation. The phrase is included in the generated result. If you are using
# beam search, all of the active beams should contain the stop phrase to terminate
# generation. Before checking whether a stop phrase is included in the result, the
# phrase is converted into tokens.
stop: Optional[List[str]] = None
# Sampling temperature. Smaller temperature makes the generation result closer to
# greedy, argmax (i.e., `top_k = 1`) sampling. If it is `None`, then 1.0 is used.
temperature: Optional[float] = None
# Tokens comprising the top `top_p` probability mass are kept for sampling. Numbers
# between 0.0 (exclusive) and 1.0 (inclusive) are allowed. If it is `None`, then 1.0
# is used by default.
top_p: Optional[float] = None
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate if personal access token is provided in environment."""
try:
import friendli
except ImportError as e:
raise ImportError(
"Could not import friendli-client python package. "
"Please install it with `pip install friendli-client`."
) from e
friendli_token = convert_to_secret_str(
get_from_dict_or_env(values, "friendli_token", "FRIENDLI_TOKEN")
)
values["friendli_token"] = friendli_token
friendli_token_str = friendli_token.get_secret_value()
friendli_team = values["friendli_team"] or os.getenv("FRIENDLI_TEAM")
values["friendli_team"] = friendli_team
values["client"] = values["client"] or friendli.Friendli(
token=friendli_token_str, team_id=friendli_team
)
values["async_client"] = values["async_client"] or friendli.AsyncFriendli(
token=friendli_token_str, team_id=friendli_team
)
return values
class Friendli(LLM, BaseFriendli):
"""Friendli LLM.
``friendli-client`` package should be installed with `pip install friendli-client`.
You must set ``FRIENDLI_TOKEN`` environment variable or provide the value of your
personal access token for the ``friendli_token`` argument.
Example:
.. code-block:: python
from langchain_community.llms import Friendli
friendli = Friendli(
model="mixtral-8x7b-instruct-v0-1", friendli_token="YOUR FRIENDLI TOKEN"
)
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"friendli_token": "FRIENDLI_TOKEN"}
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Friendli completions API."""
return {
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"max_tokens": self.max_tokens,
"stop": self.stop,
"temperature": self.temperature,
"top_p": self.top_p,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {"model": self.model, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "friendli"
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop"] = self.stop
else:
params["stop"] = stop
return {**params, **kwargs}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out Friendli's completions API.
Args:
prompt (str): The text prompt to generate completion for.
stop (Optional[List[str]], optional): When one of the stop phrases appears
in the generation result, the API will stop generation. The stop phrases
are excluded from the result. If beam search is enabled, all of the
active beams should contain the stop phrase to terminate generation.
Before checking whether a stop phrase is included in the result, the
phrase is converted into tokens. We recommend using stop_tokens because
it is clearer. For example, after tokenization, phrases "clear" and
" clear" can result in different token sequences due to the prepended
space character. Defaults to None.
Returns:
str: The generated text output.
Example:
.. code-block:: python
response = frienldi("Give me a recipe for the Old Fashioned cocktail.")
"""
params = self._get_invocation_params(stop=stop, **kwargs)
completion = self.client.completions.create(
model=self.model, prompt=prompt, stream=False, **params
)
return completion.choices[0].text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out Friendli's completions API Asynchronously.
Args:
prompt (str): The text prompt to generate completion for.
stop (Optional[List[str]], optional): When one of the stop phrases appears
in the generation result, the API will stop generation. The stop phrases
are excluded from the result. If beam search is enabled, all of the
active beams should contain the stop phrase to terminate generation.
Before checking whether a stop phrase is included in the result, the
phrase is converted into tokens. We recommend using stop_tokens because
it is clearer. For example, after tokenization, phrases "clear" and
" clear" can result in different token sequences due to the prepended
space character. Defaults to None.
Returns:
str: The generated text output.
Example:
.. code-block:: python
response = await frienldi("Tell me a joke.")
"""
params = self._get_invocation_params(stop=stop, **kwargs)
completion = await self.async_client.completions.create(
model=self.model, prompt=prompt, stream=False, **params
)
return completion.choices[0].text
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = self._get_invocation_params(stop=stop, **kwargs)
stream = self.client.completions.create(
model=self.model, prompt=prompt, stream=True, **params
)
for line in stream:
chunk = _stream_response_to_generation_chunk(line)
if run_manager:
run_manager.on_llm_new_token(line.text, chunk=chunk)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = self._get_invocation_params(stop=stop, **kwargs)
stream = await self.async_client.completions.create(
model=self.model, prompt=prompt, stream=True, **params
)
async for line in stream:
chunk = _stream_response_to_generation_chunk(line)
if run_manager:
await run_manager.on_llm_new_token(line.text, chunk=chunk)
yield chunk
def _generate(
self,
prompts: list[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out Friendli's completions API with k unique prompts.
Args:
prompt (str): The text prompt to generate completion for.
stop (Optional[List[str]], optional): When one of the stop phrases appears
in the generation result, the API will stop generation. The stop phrases
are excluded from the result. If beam search is enabled, all of the
active beams should contain the stop phrase to terminate generation.
Before checking whether a stop phrase is included in the result, the
phrase is converted into tokens. We recommend using stop_tokens because
it is clearer. For example, after tokenization, phrases "clear" and
" clear" can result in different token sequences due to the prepended
space character. Defaults to None.
Returns:
str: The generated text output.
Example:
.. code-block:: python
response = frienldi.generate(["Tell me a joke."])
"""
llm_output = {"model": self.model}
if self.streaming:
if len(prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]], llm_output=llm_output)
llm_result = super()._generate(prompts, stop, run_manager, **kwargs)
llm_result.llm_output = llm_output
return llm_result
async def _agenerate(
self,
prompts: list[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out Friendli's completions API asynchronously with k unique prompts.
Args:
prompt (str): The text prompt to generate completion for.
stop (Optional[List[str]], optional): When one of the stop phrases appears
in the generation result, the API will stop generation. The stop phrases
are excluded from the result. If beam search is enabled, all of the
active beams should contain the stop phrase to terminate generation.
Before checking whether a stop phrase is included in the result, the
phrase is converted into tokens. We recommend using stop_tokens because
it is clearer. For example, after tokenization, phrases "clear" and
" clear" can result in different token sequences due to the prepended
space character. Defaults to None.
Returns:
str: The generated text output.
Example:
.. code-block:: python
response = await frienldi.agenerate(
["Give me a recipe for the Old Fashioned cocktail."]
)
"""
llm_output = {"model": self.model}
if self.streaming:
if len(prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation = None
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]], llm_output=llm_output)
llm_result = await super()._agenerate(prompts, stop, run_manager, **kwargs)
llm_result.llm_output = llm_output
return llm_result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/self_hosted_hugging_face.py | import importlib.util
import logging
from typing import Any, Callable, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from pydantic import ConfigDict
from langchain_community.llms.self_hosted import SelfHostedPipeline
from langchain_community.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
logger = logging.getLogger(__name__)
def _generate_text(
pipeline: Any,
prompt: str,
*args: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Inference function to send to the remote hardware.
Accepts a Hugging Face pipeline (or more likely,
a key pointing to such a pipeline on the cluster's object store)
and returns generated text.
"""
response = pipeline(prompt, *args, **kwargs)
if pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _load_transformer(
model_id: str = DEFAULT_MODEL_ID,
task: str = DEFAULT_TASK,
device: int = 0,
model_kwargs: Optional[dict] = None,
) -> Any:
"""Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
"""
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers import pipeline as hf_pipeline
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ImportError(
f"Could not load the {task} model due to missing dependencies."
) from e
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
model_kwargs=_model_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return pipeline
class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
"""HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example using from_model_id:
.. code-block:: python
from langchain_community.llms import SelfHostedHuggingFaceLLM
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-large", task="text2text-generation",
hardware=gpu
)
Example passing fn that generates a pipeline (bc the pipeline is not serializable):
.. code-block:: python
from langchain_community.llms import SelfHostedHuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def get_pipeline():
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer
)
return pipe
hf = SelfHostedHuggingFaceLLM(
model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu)
"""
model_id: str = DEFAULT_MODEL_ID
"""Hugging Face model_id to load the model."""
task: str = DEFAULT_TASK
"""Hugging Face task ("text-generation", "text2text-generation" or
"summarization")."""
device: int = 0
"""Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
hardware: Any = None
"""Remote hardware to send the inference function to."""
model_reqs: List[str] = ["./", "transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
model_load_fn: Callable = _load_transformer
"""Function to load the model remotely on the server."""
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
model_config = ConfigDict(
extra="forbid",
)
def __init__(self, **kwargs: Any):
"""Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
load_fn_kwargs = {
"model_id": kwargs.get("model_id", DEFAULT_MODEL_ID),
"task": kwargs.get("task", DEFAULT_TASK),
"device": kwargs.get("device", 0),
"model_kwargs": kwargs.get("model_kwargs", None),
}
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
return "selfhosted_huggingface_pipeline"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return self.client(
pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/petals.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from langchain_core.utils.pydantic import get_fields
from pydantic import ConfigDict, Field, SecretStr, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class Petals(LLM):
"""Petals Bloom models.
To use, you should have the ``petals`` python package installed, and the
environment variable ``HUGGINGFACE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import petals
petals = Petals()
"""
client: Any = None
"""The client to use for the API calls."""
tokenizer: Any = None
"""The tokenizer to use for the API calls."""
model_name: str = "bigscience/bloom-petals"
"""The model to use."""
temperature: float = 0.7
"""What sampling temperature to use"""
max_new_tokens: int = 256
"""The maximum number of new tokens to generate in the completion."""
top_p: float = 0.9
"""The cumulative probability for top-p sampling."""
top_k: Optional[int] = None
"""The number of highest probability vocabulary tokens
to keep for top-k-filtering."""
do_sample: bool = True
"""Whether or not to use sampling; use greedy decoding otherwise."""
max_length: Optional[int] = None
"""The maximum length of the sequence to be generated."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call
not explicitly specified."""
huggingface_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in get_fields(cls).values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingface_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "huggingface_api_key", "HUGGINGFACE_API_KEY")
)
try:
from petals import AutoDistributedModelForCausalLM
from transformers import AutoTokenizer
model_name = values["model_name"]
values["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
values["client"] = AutoDistributedModelForCausalLM.from_pretrained(
model_name
)
values["huggingface_api_key"] = huggingface_api_key.get_secret_value()
except ImportError:
raise ImportError(
"Could not import transformers or petals python package."
"Please install with `pip install -U transformers petals`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Petals API."""
normal_params = {
"temperature": self.temperature,
"max_new_tokens": self.max_new_tokens,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
"max_length": self.max_length,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "petals"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Petals API."""
params = self._default_params
params = {**params, **kwargs}
inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"]
outputs = self.client.generate(inputs, **params)
text = self.tokenizer.decode(outputs[0])
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/amazon_api_gateway.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import ConfigDict
from langchain_community.llms.utils import enforce_stop_tokens
class ContentHandlerAmazonAPIGateway:
"""Adapter to prepare the inputs from Langchain to a format
that LLM model expects.
It also provides helper function to extract
the generated text from the model response."""
@classmethod
def transform_input(
cls, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
return {"inputs": prompt, "parameters": model_kwargs}
@classmethod
def transform_output(cls, response: Any) -> str:
return response.json()[0]["generated_text"]
class AmazonAPIGateway(LLM):
"""Amazon API Gateway to access LLM models hosted on AWS."""
api_url: str
"""API Gateway URL"""
headers: Optional[Dict] = None
"""API Gateway HTTP Headers to send, e.g. for authentication"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model."""
content_handler: ContentHandlerAmazonAPIGateway = ContentHandlerAmazonAPIGateway()
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
model_config = ConfigDict(
extra="forbid",
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"api_url": self.api_url, "headers": self.headers},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_api_gateway"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Amazon API Gateway model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
payload = self.content_handler.transform_input(prompt, _model_kwargs)
try:
response = requests.post(
self.api_url,
headers=self.headers,
json=payload,
)
text = self.content_handler.transform_output(response)
except Exception as error:
raise ValueError(f"Error raised by the service: {error}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/mlx_pipeline.py | from __future__ import annotations
import logging
from typing import Any, Callable, Iterator, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from pydantic import ConfigDict
DEFAULT_MODEL_ID = "mlx-community/quantized-gemma-2b"
logger = logging.getLogger(__name__)
class MLXPipeline(LLM):
"""MLX Pipeline API.
To use, you should have the ``mlx-lm`` python package installed.
Example using from_model_id:
.. code-block:: python
from langchain_community.llms import MLXPipeline
pipe = MLXPipeline.from_model_id(
model_id="mlx-community/quantized-gemma-2b",
pipeline_kwargs={"max_tokens": 10, "temp": 0.7},
)
Example passing model and tokenizer in directly:
.. code-block:: python
from langchain_community.llms import MLXPipeline
from mlx_lm import load
model_id="mlx-community/quantized-gemma-2b"
model, tokenizer = load(model_id)
pipe = MLXPipeline(model=model, tokenizer=tokenizer)
"""
model_id: str = DEFAULT_MODEL_ID
"""Model name to use."""
model: Any = None #: :meta private:
"""Model."""
tokenizer: Any = None #: :meta private:
"""Tokenizer."""
tokenizer_config: Optional[dict] = None
"""
Configuration parameters specifically for the tokenizer.
Defaults to an empty dictionary.
"""
adapter_file: Optional[str] = None
"""
Path to the adapter file. If provided, applies LoRA layers to the model.
Defaults to None.
"""
lazy: bool = False
"""
If False eval the model parameters to make sure they are
loaded in memory before returning, otherwise they will be loaded
when needed. Default: ``False``
"""
pipeline_kwargs: Optional[dict] = None
"""
Keyword arguments passed to the pipeline. Defaults include:
- temp (float): Temperature for generation, default is 0.0.
- max_tokens (int): Maximum tokens to generate, default is 100.
- verbose (bool): Whether to output verbose logging, default is False.
- formatter (Optional[Callable]): A callable to format the output.
Default is None.
- repetition_penalty (Optional[float]): The penalty factor for
repeated sequences, default is None.
- repetition_context_size (Optional[int]): Size of the context
for applying repetition penalty, default is None.
- top_p (float): The cumulative probability threshold for
top-p filtering, default is 1.0.
"""
model_config = ConfigDict(
extra="forbid",
)
@classmethod
def from_model_id(
cls,
model_id: str,
tokenizer_config: Optional[dict] = None,
adapter_file: Optional[str] = None,
lazy: bool = False,
pipeline_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> MLXPipeline:
"""Construct the pipeline object from model_id and task."""
try:
from mlx_lm import load
except ImportError:
raise ImportError(
"Could not import mlx_lm python package. "
"Please install it with `pip install mlx_lm`."
)
tokenizer_config = tokenizer_config or {}
if adapter_file:
model, tokenizer = load(model_id, tokenizer_config, adapter_file, lazy)
else:
model, tokenizer = load(model_id, tokenizer_config, lazy=lazy)
_pipeline_kwargs = pipeline_kwargs or {}
return cls(
model_id=model_id,
model=model,
tokenizer=tokenizer,
tokenizer_config=tokenizer_config,
adapter_file=adapter_file,
lazy=lazy,
pipeline_kwargs=_pipeline_kwargs,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"tokenizer_config": self.tokenizer_config,
"adapter_file": self.adapter_file,
"lazy": self.lazy,
"pipeline_kwargs": self.pipeline_kwargs,
}
@property
def _llm_type(self) -> str:
return "mlx_pipeline"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
try:
from mlx_lm import generate
except ImportError:
raise ImportError(
"Could not import mlx_lm python package. "
"Please install it with `pip install mlx_lm`."
)
pipeline_kwargs = kwargs.get("pipeline_kwargs", self.pipeline_kwargs)
temp: float = pipeline_kwargs.get("temp", 0.0)
max_tokens: int = pipeline_kwargs.get("max_tokens", 100)
verbose: bool = pipeline_kwargs.get("verbose", False)
formatter: Optional[Callable] = pipeline_kwargs.get("formatter", None)
repetition_penalty: Optional[float] = pipeline_kwargs.get(
"repetition_penalty", None
)
repetition_context_size: Optional[int] = pipeline_kwargs.get(
"repetition_context_size", None
)
top_p: float = pipeline_kwargs.get("top_p", 1.0)
return generate(
model=self.model,
tokenizer=self.tokenizer,
prompt=prompt,
temp=temp,
max_tokens=max_tokens,
verbose=verbose,
formatter=formatter,
repetition_penalty=repetition_penalty,
repetition_context_size=repetition_context_size,
top_p=top_p,
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
try:
import mlx.core as mx
from mlx_lm.utils import generate_step
except ImportError:
raise ImportError(
"Could not import mlx_lm python package. "
"Please install it with `pip install mlx_lm`."
)
pipeline_kwargs = kwargs.get("pipeline_kwargs", self.pipeline_kwargs)
temp: float = pipeline_kwargs.get("temp", 0.0)
max_new_tokens: int = pipeline_kwargs.get("max_tokens", 100)
repetition_penalty: Optional[float] = pipeline_kwargs.get(
"repetition_penalty", None
)
repetition_context_size: Optional[int] = pipeline_kwargs.get(
"repetition_context_size", None
)
top_p: float = pipeline_kwargs.get("top_p", 1.0)
prompt = self.tokenizer.encode(prompt, return_tensors="np")
prompt_tokens = mx.array(prompt[0])
eos_token_id = self.tokenizer.eos_token_id
detokenizer = self.tokenizer.detokenizer
detokenizer.reset()
for (token, prob), n in zip(
generate_step(
prompt=prompt_tokens,
model=self.model,
temp=temp,
repetition_penalty=repetition_penalty,
repetition_context_size=repetition_context_size,
top_p=top_p,
),
range(max_new_tokens),
):
# identify text to yield
text: Optional[str] = None
detokenizer.add_token(token)
detokenizer.finalize()
text = detokenizer.last_segment
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if token == eos_token_id or (stop is not None and text in stop):
break
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/llamacpp.py | from __future__ import annotations
import logging
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_pydantic_field_names, pre_init
from langchain_core.utils.utils import _build_model_kwargs
from pydantic import Field, model_validator
logger = logging.getLogger(__name__)
class LlamaCpp(LLM):
"""llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/llama/model")
"""
client: Any = None #: :meta private:
model_path: str
"""The path to the Llama model file."""
lora_base: Optional[str] = None
"""The path to the Llama LoRA base model."""
lora_path: Optional[str] = None
"""The path to the Llama LoRA. If None, no LoRa is loaded."""
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(True, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use.
If None, the number of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
suffix: Optional[str] = Field(None)
"""A suffix to append to the generated text. If None, no suffix is appended."""
max_tokens: Optional[int] = 256
"""The maximum number of tokens to generate."""
temperature: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
logprobs: Optional[int] = Field(None)
"""The number of logprobs to return. If None, no logprobs are returned."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_penalty: Optional[float] = 1.1
"""The penalty to apply to repeated tokens."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
last_n_tokens_size: Optional[int] = 64
"""The number of tokens to look back when applying the repeat_penalty."""
use_mmap: Optional[bool] = True
"""Whether to keep the model loaded in RAM"""
rope_freq_scale: float = 1.0
"""Scale factor for rope sampling."""
rope_freq_base: float = 10000.0
"""Base frequency for rope sampling."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Any additional parameters to pass to llama_cpp.Llama."""
streaming: bool = True
"""Whether to stream the results, token by token."""
grammar_path: Optional[Union[str, Path]] = None
"""
grammar_path: Path to the .gbnf file that defines formal grammars
for constraining model outputs. For instance, the grammar can be used
to force the model to generate valid JSON or to speak exclusively in emojis. At most
one of grammar_path and grammar should be passed in.
"""
grammar: Optional[Union[str, Any]] = None
"""
grammar: formal grammar for constraining model outputs. For instance, the grammar
can be used to force the model to generate valid JSON or to speak exclusively in
emojis. At most one of grammar_path and grammar should be passed in.
"""
verbose: bool = True
"""Print verbose output to stderr."""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
try:
from llama_cpp import Llama, LlamaGrammar
except ImportError:
raise ImportError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
model_path = values["model_path"]
model_param_names = [
"rope_freq_scale",
"rope_freq_base",
"lora_path",
"lora_base",
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
"use_mmap",
"last_n_tokens_size",
"verbose",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
model_params.update(values["model_kwargs"])
try:
values["client"] = Llama(model_path, **model_params)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
if values["grammar"] and values["grammar_path"]:
grammar = values["grammar"]
grammar_path = values["grammar_path"]
raise ValueError(
"Can only pass in one of grammar and grammar_path. Received "
f"{grammar=} and {grammar_path=}."
)
elif isinstance(values["grammar"], str):
values["grammar"] = LlamaGrammar.from_string(values["grammar"])
elif values["grammar_path"]:
values["grammar"] = LlamaGrammar.from_file(values["grammar_path"])
else:
pass
return values
@model_validator(mode="before")
@classmethod
def build_model_kwargs(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
values = _build_model_kwargs(values, all_required_field_names)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling llama_cpp."""
params = {
"suffix": self.suffix,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"logprobs": self.logprobs,
"echo": self.echo,
"stop_sequences": self.stop, # key here is convention among LLM classes
"repeat_penalty": self.repeat_penalty,
"top_k": self.top_k,
}
if self.grammar:
params["grammar"] = self.grammar
return params
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_path": self.model_path}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "llamacpp"
def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by llama_cpp.
Args:
stop (Optional[List[str]]): List of stop sequences for llama_cpp.
Returns:
Dictionary containing the combined parameters.
"""
# Raise error if stop sequences are in both input and default params
if self.stop and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
params = self._default_params
# llama_cpp expects the "stop" key not this, so we remove it:
params.pop("stop_sequences")
# then sets it as configured, or default to an empty list:
params["stop"] = self.stop or stop or []
return params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
llm.invoke("This is a prompt.")
"""
if self.streaming:
# If streaming is enabled, we use the stream
# method that yields as they are generated
# and return the combined strings from the first choices's text:
combined_text_output = ""
for chunk in self._stream(
prompt=prompt,
stop=stop,
run_manager=run_manager,
**kwargs,
):
combined_text_output += chunk.text
return combined_text_output
else:
params = self._get_parameters(stop)
params = {**params, **kwargs}
result = self.client(prompt=prompt, **params)
return result["choices"][0]["text"]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See llama-cpp-python docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(
model_path="/path/to/local/model.bin",
temperature = 0.5
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","\n"]):
result = chunk["choices"][0]
print(result["text"], end='', flush=True) # noqa: T201
"""
params = {**self._get_parameters(stop), **kwargs}
result = self.client(prompt=prompt, stream=True, **params)
for part in result:
logprobs = part["choices"][0].get("logprobs", None)
chunk = GenerationChunk(
text=part["choices"][0]["text"],
generation_info={"logprobs": logprobs},
)
if run_manager:
run_manager.on_llm_new_token(
token=chunk.text, verbose=self.verbose, log_probs=logprobs
)
yield chunk
def get_num_tokens(self, text: str) -> int:
tokenized_text = self.client.tokenize(text.encode("utf-8"))
return len(tokenized_text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/cohere.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.load.serializable import Serializable
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, Field, SecretStr
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
def _create_retry_decorator(max_retries: int) -> Callable[[Any], Any]:
import cohere
# support v4 and v5
retry_conditions = (
retry_if_exception_type(cohere.error.CohereError)
if hasattr(cohere, "error")
else retry_if_exception_type(Exception)
)
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=retry_conditions,
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm.max_retries)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.generate(**kwargs)
return _completion_with_retry(**kwargs)
def acompletion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm.max_retries)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
return await llm.async_client.generate(**kwargs)
return _completion_with_retry(**kwargs)
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.BaseCohere"
)
class BaseCohere(Serializable):
"""Base class for Cohere models."""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model: Optional[str] = Field(default=None)
"""Model name to use."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
cohere_api_key: Optional[SecretStr] = None
"""Cohere API key. If not provided, will be read from the environment variable."""
stop: Optional[List[str]] = None
streaming: bool = Field(default=False)
"""Whether to stream the results."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
else:
values["cohere_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "cohere_api_key", "COHERE_API_KEY")
)
client_name = values["user_agent"]
values["client"] = cohere.Client(
api_key=values["cohere_api_key"].get_secret_value(),
client_name=client_name,
)
values["async_client"] = cohere.AsyncClient(
api_key=values["cohere_api_key"].get_secret_value(),
client_name=client_name,
)
return values
@deprecated(since="0.1.14", removal="1.0", alternative_import="langchain_cohere.Cohere")
class Cohere(LLM, BaseCohere):
"""Cohere large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import Cohere
cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key")
"""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
k: int = 0
"""Number of most likely tokens to consider at each step."""
p: int = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
truncate: Optional[str] = None
"""Specify how the client handles inputs longer than the maximum token
length: Truncate from START, END or NONE"""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
model_config = ConfigDict(
extra="forbid",
)
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"k": self.k,
"p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"truncate": self.truncate,
}
@property
def lc_secrets(self) -> Dict[str, str]:
return {"cohere_api_key": "COHERE_API_KEY"}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cohere"
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict:
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
return {**params, **kwargs}
def _process_response(self, response: Any, stop: Optional[List[str]]) -> str:
text = response.generations[0].text
# If stop tokens are provided, Cohere's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop:
text = enforce_stop_tokens(text, stop)
return text
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
response = completion_with_retry(
self, model=self.model, prompt=prompt, **params
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Async call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = await cohere("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
response = await acompletion_with_retry(
self, model=self.model, prompt=prompt, **params
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/bananadev.py | import logging
from typing import Any, Dict, List, Mapping, Optional, cast
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import (
secret_from_env,
)
from pydantic import ConfigDict, Field, SecretStr, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class Banana(LLM):
"""Banana large language models.
To use, you should have the ``banana-dev`` python package installed,
and the environment variable ``BANANA_API_KEY`` set with your API key.
This is the team API key available in the Banana dashboard.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import Banana
banana = Banana(model_key="", model_url_slug="")
"""
model_key: str = ""
"""model key to use"""
model_url_slug: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
banana_api_key: Optional[SecretStr] = Field(
default_factory=secret_from_env("BANANA_API_KEY", default=None)
)
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = set(list(cls.model_fields.keys()))
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_key": self.model_key},
**{"model_url_slug": self.model_url_slug},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "bananadev"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Banana endpoint."""
try:
from banana_dev import Client
except ImportError:
raise ImportError(
"Could not import banana-dev python package. "
"Please install it with `pip install banana-dev`."
)
params = self.model_kwargs or {}
params = {**params, **kwargs}
api_key = cast(SecretStr, self.banana_api_key)
model_key = self.model_key
model_url_slug = self.model_url_slug
model_inputs = {
# a json specific to your model.
"prompt": prompt,
**params,
}
model = Client(
# Found in main dashboard
api_key=api_key.get_secret_value(),
# Both found in model details page
model_key=model_key,
url=f"https://{model_url_slug}.run.banana.dev",
)
response, meta = model.call("/", model_inputs)
try:
text = response["outputs"]
except (KeyError, TypeError):
raise ValueError(
"Response should be of schema: {'outputs': 'text'}."
"\nTo fix this:"
"\n- fork the source repo of the Banana model"
"\n- modify app.py to return the above schema"
"\n- deploy that as a custom repo"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/octoai_endpoint.py | from typing import Any, Dict
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.llms.openai import BaseOpenAI
from langchain_community.utils.openai import is_openai_v1
DEFAULT_BASE_URL = "https://text.octoai.run/v1/"
DEFAULT_MODEL = "codellama-7b-instruct"
class OctoAIEndpoint(BaseOpenAI): # type: ignore[override]
"""OctoAI LLM Endpoints - OpenAI compatible.
OctoAIEndpoint is a class to interact with OctoAI Compute Service large
language model endpoints.
To use, you should have the environment variable ``OCTOAI_API_TOKEN`` set
with your API token, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
llm = OctoAIEndpoint(
model="llama-2-13b-chat-fp16",
max_tokens=200,
presence_penalty=0,
temperature=0.1,
top_p=0.9,
)
"""
"""Key word arguments to pass to the model."""
octoai_api_base: str = Field(default=DEFAULT_BASE_URL)
octoai_api_token: SecretStr = Field(default=SecretStr(""))
model_name: str = Field(default=DEFAULT_MODEL)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params: Dict[str, Any] = {
"model": self.model_name,
**self._default_params,
}
if not is_openai_v1():
params.update(
{
"api_key": self.octoai_api_token.get_secret_value(),
"api_base": self.octoai_api_base,
}
)
return {**params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "octoai_endpoint"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["octoai_api_base"] = get_from_dict_or_env(
values,
"octoai_api_base",
"OCTOAI_API_BASE",
default=DEFAULT_BASE_URL,
)
values["octoai_api_token"] = convert_to_secret_str(
get_from_dict_or_env(values, "octoai_api_token", "OCTOAI_API_TOKEN")
)
values["model_name"] = get_from_dict_or_env(
values,
"model_name",
"MODEL_NAME",
default=DEFAULT_MODEL,
)
try:
import openai
if is_openai_v1():
client_params = {
"api_key": values["octoai_api_token"].get_secret_value(),
"base_url": values["octoai_api_base"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).completions
else:
values["openai_api_base"] = values["octoai_api_base"]
values["openai_api_key"] = values["octoai_api_token"].get_secret_value()
values["client"] = openai.Completion # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if "endpoint_url" in values["model_kwargs"]:
raise ValueError(
"`endpoint_url` was deprecated, please use `octoai_api_base`."
)
return values
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/google_palm.py | from __future__ import annotations
from typing import Any, Dict, Iterator, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LanguageModelInput
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, SecretStr
from langchain_community.llms import BaseLLM
from langchain_community.utilities.vertexai import create_retry_decorator
def completion_with_retry(
llm: GooglePalm,
prompt: LanguageModelInput,
is_gemini: bool = False,
stream: bool = False,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = create_retry_decorator(
llm, max_retries=llm.max_retries, run_manager=run_manager
)
@retry_decorator
def _completion_with_retry(
prompt: LanguageModelInput, is_gemini: bool, stream: bool, **kwargs: Any
) -> Any:
generation_config = kwargs.get("generation_config", {})
if is_gemini:
return llm.client.generate_content(
contents=prompt, stream=stream, generation_config=generation_config
)
return llm.client.generate_text(prompt=prompt, **kwargs)
return _completion_with_retry(
prompt=prompt, is_gemini=is_gemini, stream=stream, **kwargs
)
def _is_gemini_model(model_name: str) -> bool:
return "gemini" in model_name
def _strip_erroneous_leading_spaces(text: str) -> str:
"""Strip erroneous leading spaces from text.
The PaLM API will sometimes erroneously return a single leading space in all
lines > 1. This function strips that space.
"""
has_leading_space = all(not line or line[0] == " " for line in text.split("\n")[1:])
if has_leading_space:
return text.replace("\n ", "\n")
else:
return text
@deprecated("0.0.12", alternative_import="langchain_google_genai.GoogleGenerativeAI")
class GooglePalm(BaseLLM, BaseModel):
"""
DEPRECATED: Use `langchain_google_genai.GoogleGenerativeAI` instead.
Google PaLM models.
"""
client: Any #: :meta private:
google_api_key: Optional[SecretStr]
model_name: str = "models/text-bison-001"
"""Model name to use."""
temperature: float = 0.7
"""Run inference with this temperature. Must be in the closed interval
[0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
max_output_tokens: Optional[int] = None
"""Maximum number of tokens to include in a candidate. Must be greater than zero.
If unset, will default to 64."""
n: int = 1
"""Number of chat completions to generate for each prompt. Note that the API may
not return the full n completions if duplicates are generated."""
max_retries: int = 6
"""The maximum number of retries to make when generating."""
@property
def is_gemini(self) -> bool:
"""Returns whether a model is belongs to a Gemini family or not."""
return _is_gemini_model(self.model_name)
@property
def lc_secrets(self) -> Dict[str, str]:
return {"google_api_key": "GOOGLE_API_KEY"}
@classmethod
def is_lc_serializable(self) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "google_palm"]
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
model_name = values["model_name"]
try:
import google.generativeai as genai
if isinstance(google_api_key, SecretStr):
google_api_key = google_api_key.get_secret_value()
genai.configure(api_key=google_api_key)
if _is_gemini_model(model_name):
values["client"] = genai.GenerativeModel(model_name=model_name)
else:
values["client"] = genai
except ImportError:
raise ImportError(
"Could not import google-generativeai python package. "
"Please install it with `pip install google-generativeai`."
)
if values["temperature"] is not None and not 0 <= values["temperature"] <= 1:
raise ValueError("temperature must be in the range [0.0, 1.0]")
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive")
if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0:
raise ValueError("max_output_tokens must be greater than zero")
return values
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
generations: List[List[Generation]] = []
generation_config = {
"stop_sequences": stop,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"max_output_tokens": self.max_output_tokens,
"candidate_count": self.n,
}
for prompt in prompts:
if self.is_gemini:
res = completion_with_retry(
self,
prompt=prompt,
stream=False,
is_gemini=True,
run_manager=run_manager,
generation_config=generation_config,
)
candidates = [
"".join([p.text for p in c.content.parts]) for c in res.candidates
]
generations.append([Generation(text=c) for c in candidates])
else:
res = completion_with_retry(
self,
model=self.model_name,
prompt=prompt,
stream=False,
is_gemini=False,
run_manager=run_manager,
**generation_config,
)
prompt_generations = []
for candidate in res.candidates:
raw_text = candidate["output"]
stripped_text = _strip_erroneous_leading_spaces(raw_text)
prompt_generations.append(Generation(text=stripped_text))
generations.append(prompt_generations)
return LLMResult(generations=generations)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
generation_config = kwargs.get("generation_config", {})
if stop:
generation_config["stop_sequences"] = stop
for stream_resp in completion_with_retry(
self,
prompt,
stream=True,
is_gemini=True,
run_manager=run_manager,
generation_config=generation_config,
**kwargs,
):
chunk = GenerationChunk(text=stream_resp.text)
if run_manager:
run_manager.on_llm_new_token(
stream_resp.text,
chunk=chunk,
verbose=self.verbose,
)
yield chunk
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "google_palm"
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
if self.is_gemini:
raise ValueError("Counting tokens is not yet supported!")
result = self.client.count_text_tokens(model=self.model_name, prompt=text)
return result["token_count"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/baseten.py | import logging
import os
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import Field
logger = logging.getLogger(__name__)
class Baseten(LLM):
"""Baseten model
This module allows using LLMs hosted on Baseten.
The LLM deployed on Baseten must have the following properties:
* Must accept input as a dictionary with the key "prompt"
* May accept other input in the dictionary passed through with kwargs
* Must return a string with the model output
To use this module, you must:
* Export your Baseten API key as the environment variable `BASETEN_API_KEY`
* Get the model ID for your model from your Baseten dashboard
* Identify the model deployment ("production" for all model library models)
These code samples use
[Mistral 7B Instruct](https://app.baseten.co/explore/mistral_7b_instruct)
from Baseten's model library.
Examples:
.. code-block:: python
from langchain_community.llms import Baseten
# Production deployment
mistral = Baseten(model="MODEL_ID", deployment="production")
mistral("What is the Mistral wind?")
.. code-block:: python
from langchain_community.llms import Baseten
# Development deployment
mistral = Baseten(model="MODEL_ID", deployment="development")
mistral("What is the Mistral wind?")
.. code-block:: python
from langchain_community.llms import Baseten
# Other published deployment
mistral = Baseten(model="MODEL_ID", deployment="DEPLOYMENT_ID")
mistral("What is the Mistral wind?")
"""
model: str
deployment: str
input: Dict[str, Any] = Field(default_factory=dict)
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "baseten"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
baseten_api_key = os.environ["BASETEN_API_KEY"]
model_id = self.model
if self.deployment == "production":
model_url = f"https://model-{model_id}.api.baseten.co/production/predict"
elif self.deployment == "development":
model_url = f"https://model-{model_id}.api.baseten.co/development/predict"
else: # try specific deployment ID
model_url = f"https://model-{model_id}.api.baseten.co/deployment/{self.deployment}/predict"
response = requests.post(
model_url,
headers={"Authorization": f"Api-Key {baseten_api_key}"},
json={"prompt": prompt, **kwargs},
)
return response.json()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/forefrontai.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import ConfigDict, SecretStr, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
class ForefrontAI(LLM):
"""ForefrontAI large language models.
To use, you should have the environment variable ``FOREFRONTAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain_community.llms import ForefrontAI
forefrontai = ForefrontAI(endpoint_url="")
"""
endpoint_url: str = ""
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
length: int = 256
"""The maximum number of tokens to generate in the completion."""
top_p: float = 1.0
"""Total probability mass of tokens to consider at each step."""
top_k: int = 40
"""The number of highest probability vocabulary tokens to
keep for top-k-filtering."""
repetition_penalty: int = 1
"""Penalizes repeated tokens according to frequency."""
forefrontai_api_key: SecretStr
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
values["forefrontai_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "forefrontai_api_key", "FOREFRONTAI_API_KEY")
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling ForefrontAI API."""
return {
"temperature": self.temperature,
"length": self.length,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"endpoint_url": self.endpoint_url}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "forefrontai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to ForefrontAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ForefrontAI("Tell me a joke.")
"""
auth_value = f"Bearer {self.forefrontai_api_key.get_secret_value()}"
response = requests.post(
url=self.endpoint_url,
headers={
"Authorization": auth_value,
"Content-Type": "application/json",
},
json={"text": prompt, **self._default_params, **kwargs},
)
response_json = response.json()
text = response_json["result"][0]["completion"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/oci_generative_ai.py | from __future__ import annotations
import json
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Dict, Iterator, List, Mapping, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import pre_init
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.llms.utils import enforce_stop_tokens
CUSTOM_ENDPOINT_PREFIX = "ocid1.generativeaiendpoint"
class Provider(ABC):
@property
@abstractmethod
def stop_sequence_key(self) -> str: ...
@abstractmethod
def completion_response_to_text(self, response: Any) -> str: ...
class CohereProvider(Provider):
stop_sequence_key: str = "stop_sequences"
def __init__(self) -> None:
from oci.generative_ai_inference import models
self.llm_inference_request = models.CohereLlmInferenceRequest
def completion_response_to_text(self, response: Any) -> str:
return response.data.inference_response.generated_texts[0].text
class MetaProvider(Provider):
stop_sequence_key: str = "stop"
def __init__(self) -> None:
from oci.generative_ai_inference import models
self.llm_inference_request = models.LlamaLlmInferenceRequest
def completion_response_to_text(self, response: Any) -> str:
return response.data.inference_response.choices[0].text
class OCIAuthType(Enum):
"""OCI authentication types as enumerator."""
API_KEY = 1
SECURITY_TOKEN = 2
INSTANCE_PRINCIPAL = 3
RESOURCE_PRINCIPAL = 4
class OCIGenAIBase(BaseModel, ABC):
"""Base class for OCI GenAI models"""
client: Any = Field(default=None, exclude=True) #: :meta private:
auth_type: Optional[str] = "API_KEY"
"""Authentication type, could be
API_KEY,
SECURITY_TOKEN,
INSTANCE_PRINCIPAL,
RESOURCE_PRINCIPAL
If not specified, API_KEY will be used
"""
auth_profile: Optional[str] = "DEFAULT"
"""The name of the profile in ~/.oci/config
If not specified , DEFAULT will be used
"""
model_id: Optional[str] = None
"""Id of the model to call, e.g., cohere.command"""
provider: Optional[str] = None
"""Provider name of the model. Default to None,
will try to be derived from the model_id
otherwise, requires user input
"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model"""
service_endpoint: Optional[str] = None
"""service endpoint url"""
compartment_id: Optional[str] = None
"""OCID of compartment"""
is_stream: bool = False
"""Whether to stream back partial progress"""
model_config = ConfigDict(
extra="forbid", arbitrary_types_allowed=True, protected_namespaces=()
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that OCI config and python package exists in environment."""
# Skip creating new client if passed in constructor
if values["client"] is not None:
return values
try:
import oci
client_kwargs = {
"config": {},
"signer": None,
"service_endpoint": values["service_endpoint"],
"retry_strategy": oci.retry.DEFAULT_RETRY_STRATEGY,
"timeout": (10, 240), # default timeout config for OCI Gen AI service
}
if values["auth_type"] == OCIAuthType(1).name:
client_kwargs["config"] = oci.config.from_file(
profile_name=values["auth_profile"]
)
client_kwargs.pop("signer", None)
elif values["auth_type"] == OCIAuthType(2).name:
def make_security_token_signer(oci_config): # type: ignore[no-untyped-def]
pk = oci.signer.load_private_key_from_file(
oci_config.get("key_file"), None
)
with open(
oci_config.get("security_token_file"), encoding="utf-8"
) as f:
st_string = f.read()
return oci.auth.signers.SecurityTokenSigner(st_string, pk)
client_kwargs["config"] = oci.config.from_file(
profile_name=values["auth_profile"]
)
client_kwargs["signer"] = make_security_token_signer(
oci_config=client_kwargs["config"]
)
elif values["auth_type"] == OCIAuthType(3).name:
client_kwargs["signer"] = (
oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
)
elif values["auth_type"] == OCIAuthType(4).name:
client_kwargs["signer"] = (
oci.auth.signers.get_resource_principals_signer()
)
else:
raise ValueError(
"Please provide valid value to auth_type, "
f"{values['auth_type']} is not valid."
)
values["client"] = oci.generative_ai_inference.GenerativeAiInferenceClient(
**client_kwargs
)
except ImportError as ex:
raise ModuleNotFoundError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
except Exception as e:
raise ValueError(
"""Could not authenticate with OCI client.
Please check if ~/.oci/config exists.
If INSTANCE_PRINCIPAL or RESOURCE_PRINCIPAL is used,
please check the specified
auth_profile and auth_type are valid.""",
e,
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
def _get_provider(self, provider_map: Mapping[str, Any]) -> Any:
if self.provider is not None:
provider = self.provider
else:
if self.model_id is None:
raise ValueError(
"model_id is required to derive the provider, "
"please provide the provider explicitly or specify "
"the model_id to derive the provider."
)
provider = self.model_id.split(".")[0].lower()
if provider not in provider_map:
raise ValueError(
f"Invalid provider derived from model_id: {self.model_id} "
"Please explicitly pass in the supported provider "
"when using custom endpoint"
)
return provider_map[provider]
class OCIGenAI(LLM, OCIGenAIBase):
"""OCI large language models.
To authenticate, the OCI client uses the methods described in
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm
The authentifcation method is passed through auth_type and should be one of:
API_KEY (default), SECURITY_TOKEN, INSTANCE_PRINCIPAL, RESOURCE_PRINCIPAL
Make sure you have the required policies (profile/roles) to
access the OCI Generative AI service.
If a specific config profile is used, you must pass
the name of the profile (from ~/.oci/config) through auth_profile.
To use, you must provide the compartment id
along with the endpoint url, and model id
as named parameters to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import OCIGenAI
llm = OCIGenAI(
model_id="MY_MODEL_ID",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID"
)
"""
model_config = ConfigDict(
extra="forbid",
arbitrary_types_allowed=True,
)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_generative_ai_completion"
@property
def _provider_map(self) -> Mapping[str, Any]:
"""Get the provider map"""
return {
"cohere": CohereProvider(),
"meta": MetaProvider(),
}
@property
def _provider(self) -> Any:
"""Get the internal provider object"""
return self._get_provider(provider_map=self._provider_map)
def _prepare_invocation_object(
self, prompt: str, stop: Optional[List[str]], kwargs: Dict[str, Any]
) -> Dict[str, Any]:
from oci.generative_ai_inference import models
_model_kwargs = self.model_kwargs or {}
if stop is not None:
_model_kwargs[self._provider.stop_sequence_key] = stop
if self.model_id is None:
raise ValueError(
"model_id is required to call the model, "
"please provide the model_id."
)
if self.model_id.startswith(CUSTOM_ENDPOINT_PREFIX):
serving_mode = models.DedicatedServingMode(endpoint_id=self.model_id)
else:
serving_mode = models.OnDemandServingMode(model_id=self.model_id)
inference_params = {**_model_kwargs, **kwargs}
inference_params["prompt"] = prompt
inference_params["is_stream"] = self.is_stream
invocation_obj = models.GenerateTextDetails(
compartment_id=self.compartment_id,
serving_mode=serving_mode,
inference_request=self._provider.llm_inference_request(**inference_params),
)
return invocation_obj
def _process_response(self, response: Any, stop: Optional[List[str]]) -> str:
text = self._provider.completion_response_to_text(response)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to OCIGenAI generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm.invoke("Tell me a joke.")
"""
if self.is_stream:
text = ""
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
text += chunk.text
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
invocation_obj = self._prepare_invocation_object(prompt, stop, kwargs)
response = self.client.generate_text(invocation_obj)
return self._process_response(response, stop)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Stream OCIGenAI LLM on given prompt.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
An iterator of GenerationChunks.
Example:
.. code-block:: python
response = llm.stream("Tell me a joke.")
"""
self.is_stream = True
invocation_obj = self._prepare_invocation_object(prompt, stop, kwargs)
response = self.client.generate_text(invocation_obj)
for event in response.data.events():
json_load = json.loads(event.data)
if "text" in json_load:
event_data_text = json_load["text"]
else:
event_data_text = ""
chunk = GenerationChunk(text=event_data_text)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/aviary.py | import dataclasses
import os
from typing import Any, Dict, List, Mapping, Optional, Union, cast
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
TIMEOUT = 60
@dataclasses.dataclass
class AviaryBackend:
"""Aviary backend.
Attributes:
backend_url: The URL for the Aviary backend.
bearer: The bearer token for the Aviary backend.
"""
backend_url: str
bearer: str
def __post_init__(self) -> None:
self.header = {"Authorization": self.bearer}
@classmethod
def from_env(cls) -> "AviaryBackend":
aviary_url = os.getenv("AVIARY_URL")
assert aviary_url, "AVIARY_URL must be set"
aviary_token = os.getenv("AVIARY_TOKEN", "")
bearer = f"Bearer {aviary_token}" if aviary_token else ""
aviary_url += "/" if not aviary_url.endswith("/") else ""
return cls(aviary_url, bearer)
def get_models() -> List[str]:
"""List available models"""
backend = AviaryBackend.from_env()
request_url = backend.backend_url + "-/routes"
response = requests.get(request_url, headers=backend.header, timeout=TIMEOUT)
try:
result = response.json()
except requests.JSONDecodeError as e:
raise RuntimeError(
f"Error decoding JSON from {request_url}. Text response: {response.text}"
) from e
result = sorted(
[k.lstrip("/").replace("--", "/") for k in result.keys() if "--" in k]
)
return result
def get_completions(
model: str,
prompt: str,
use_prompt_format: bool = True,
version: str = "",
) -> Dict[str, Union[str, float, int]]:
"""Get completions from Aviary models."""
backend = AviaryBackend.from_env()
url = backend.backend_url + model.replace("/", "--") + "/" + version + "query"
response = requests.post(
url,
headers=backend.header,
json={"prompt": prompt, "use_prompt_format": use_prompt_format},
timeout=TIMEOUT,
)
try:
return response.json()
except requests.JSONDecodeError as e:
raise RuntimeError(
f"Error decoding JSON from {url}. Text response: {response.text}"
) from e
class Aviary(LLM):
"""Aviary hosted models.
Aviary is a backend for hosted models. You can
find out more about aviary at
http://github.com/ray-project/aviary
To get a list of the models supported on an
aviary, follow the instructions on the website to
install the aviary CLI and then use:
`aviary models`
AVIARY_URL and AVIARY_TOKEN environment variables must be set.
Attributes:
model: The name of the model to use. Defaults to "amazon/LightGPT".
aviary_url: The URL for the Aviary backend. Defaults to None.
aviary_token: The bearer token for the Aviary backend. Defaults to None.
use_prompt_format: If True, the prompt template for the model will be ignored.
Defaults to True.
version: API version to use for Aviary. Defaults to None.
Example:
.. code-block:: python
from langchain_community.llms import Aviary
os.environ["AVIARY_URL"] = "<URL>"
os.environ["AVIARY_TOKEN"] = "<TOKEN>"
light = Aviary(model='amazon/LightGPT')
output = light('How do you make fried rice?')
"""
model: str = "amazon/LightGPT"
aviary_url: Optional[str] = None
aviary_token: Optional[str] = None
# If True the prompt template for the model will be ignored.
use_prompt_format: bool = True
# API version to use for Aviary
version: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
aviary_url = get_from_dict_or_env(values, "aviary_url", "AVIARY_URL")
aviary_token = get_from_dict_or_env(values, "aviary_token", "AVIARY_TOKEN")
# Set env viarables for aviary sdk
os.environ["AVIARY_URL"] = aviary_url
os.environ["AVIARY_TOKEN"] = aviary_token
try:
aviary_models = get_models()
except requests.exceptions.RequestException as e:
raise ValueError(e)
model = values.get("model")
if model and model not in aviary_models:
raise ValueError(f"{aviary_url} does not support model {values['model']}.")
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model,
"aviary_url": self.aviary_url,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return f"aviary-{self.model.replace('/', '-')}"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aviary
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aviary("Tell me a joke.")
"""
kwargs = {"use_prompt_format": self.use_prompt_format}
if self.version:
kwargs["version"] = self.version
output = get_completions(
model=self.model,
prompt=prompt,
**kwargs,
)
text = cast(str, output["generated_text"])
if stop:
text = enforce_stop_tokens(text, stop)
return text
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/javelin_ai_gateway.py | from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from pydantic import BaseModel
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Params(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Parameters for the Javelin AI Gateway LLM."""
temperature: float = 0.0
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class JavelinAIGateway(LLM):
"""Javelin AI Gateway LLMs.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain_community.llms import JavelinAIGateway
completions = JavelinAIGateway(
gateway_uri="<your-javelin-ai-gateway-uri>",
route="<your-javelin-ai-gateway-completions-route>",
params={
"temperature": 0.1
}
)
"""
route: str
"""The route to use for the Javelin AI Gateway API."""
client: Optional[Any] = None
"""The Javelin AI Gateway client."""
gateway_uri: Optional[str] = None
"""The URI of the Javelin AI Gateway API."""
params: Optional[Params] = None
"""Parameters for the Javelin AI Gateway API."""
javelin_api_key: Optional[str] = None
"""The API key for the Javelin AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri, api_key=self.javelin_api_key
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Javelin AI Gateway API."""
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"route": self.route,
"javelin_api_key": self.javelin_api_key,
**(self.params.dict() if self.params else {}),
}
return params
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return self._default_params
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Javelin AI Gateway API."""
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
if self.client is not None:
resp = self.client.query_route(self.route, query_body=data)
else:
raise ValueError("Javelin client is not initialized.")
resp_dict = resp.dict()
try:
return resp_dict["llm_response"]["choices"][0]["text"]
except KeyError:
return ""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call async the Javelin AI Gateway API."""
data: Dict[str, Any] = {
"prompt": prompt,
**(self.params.dict() if self.params else {}),
}
if s := (stop or (self.params.stop if self.params else None)):
data["stop"] = s
if self.client is not None:
resp = await self.client.aquery_route(self.route, query_body=data)
else:
raise ValueError("Javelin client is not initialized.")
resp_dict = resp.dict()
try:
return resp_dict["llm_response"]["choices"][0]["text"]
except KeyError:
return ""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "javelin-ai-gateway"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/exllamav2.py | from typing import Any, Callable, Dict, Iterator, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import pre_init
from pydantic import Field
class ExLlamaV2(LLM):
"""ExllamaV2 API.
- working only with GPTQ models for now.
- Lora models are not supported yet.
To use, you should have the exllamav2 library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out:
Example:
.. code-block:: python
from langchain_community.llms import Exllamav2
llm = Exllamav2(model_path="/path/to/llama/model")
#TODO:
- Add loras support
- Add support for custom settings
- Add support for custom stop sequences
"""
client: Any = None
model_path: str
exllama_cache: Any = None
config: Any = None
generator: Any = None
tokenizer: Any = None
# If settings is None, it will be used as the default settings for the model.
# All other parameters won't be used.
settings: Any = None
# Langchain parameters
logfunc: Callable = print
stop_sequences: List[str] = Field([])
"""Sequences that immediately will stop the generator."""
max_new_tokens: int = Field(150)
"""Maximum number of tokens to generate."""
streaming: bool = Field(True)
"""Whether to stream the results, token by token."""
verbose: bool = Field(True)
"""Whether to print debug information."""
# Generator parameters
disallowed_tokens: Optional[List[int]] = Field(None)
"""List of tokens to disallow during generation."""
@pre_init
def validate_environment(cls, values: Dict[str, Any]) -> Dict[str, Any]:
try:
import torch
except ImportError as e:
raise ImportError(
"Unable to import torch, please install with `pip install torch`."
) from e
# check if cuda is available
if not torch.cuda.is_available():
raise EnvironmentError("CUDA is not available. ExllamaV2 requires CUDA.")
try:
from exllamav2 import (
ExLlamaV2,
ExLlamaV2Cache,
ExLlamaV2Config,
ExLlamaV2Tokenizer,
)
from exllamav2.generator import (
ExLlamaV2BaseGenerator,
ExLlamaV2StreamingGenerator,
)
except ImportError:
raise ImportError(
"Could not import exllamav2 library. "
"Please install the exllamav2 library with (cuda 12.1 is required)"
"example : "
"!python -m pip install https://github.com/turboderp/exllamav2/releases/download/v0.0.12/exllamav2-0.0.12+cu121-cp311-cp311-linux_x86_64.whl"
)
# Set logging function if verbose or set to empty lambda
verbose = values["verbose"]
if not verbose:
values["logfunc"] = lambda *args, **kwargs: None
logfunc = values["logfunc"]
if values["settings"]:
settings = values["settings"]
logfunc(settings.__dict__)
else:
raise NotImplementedError(
"settings is required. Custom settings are not supported yet."
)
config = ExLlamaV2Config()
config.model_dir = values["model_path"]
config.prepare()
model = ExLlamaV2(config)
exllama_cache = ExLlamaV2Cache(model, lazy=True)
model.load_autosplit(exllama_cache)
tokenizer = ExLlamaV2Tokenizer(config)
if values["streaming"]:
generator = ExLlamaV2StreamingGenerator(model, exllama_cache, tokenizer)
else:
generator = ExLlamaV2BaseGenerator(model, exllama_cache, tokenizer)
# Configure the model and generator
values["stop_sequences"] = [x.strip().lower() for x in values["stop_sequences"]]
setattr(settings, "stop_sequences", values["stop_sequences"])
logfunc(f"stop_sequences {values['stop_sequences']}")
disallowed = values.get("disallowed_tokens")
if disallowed:
settings.disallow_tokens(tokenizer, disallowed)
values["client"] = model
values["generator"] = generator
values["config"] = config
values["tokenizer"] = tokenizer
values["exllama_cache"] = exllama_cache
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ExLlamaV2"
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text."""
return self.generator.tokenizer.num_tokens(text)
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
generator = self.generator
if self.streaming:
combined_text_output = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, kwargs=kwargs
):
combined_text_output += str(chunk)
return combined_text_output
else:
output = generator.generate_simple(
prompt=prompt,
gen_settings=self.settings,
num_tokens=self.max_new_tokens,
)
# subtract subtext from output
output = output[len(prompt) :]
return output
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
input_ids = self.tokenizer.encode(prompt)
self.generator.warmup()
self.generator.set_stop_conditions([])
self.generator.begin_stream(input_ids, self.settings)
generated_tokens = 0
while True:
chunk, eos, _ = self.generator.stream()
generated_tokens += 1
if run_manager:
run_manager.on_llm_new_token(
token=chunk,
verbose=self.verbose,
)
yield chunk
if eos or generated_tokens == self.max_new_tokens:
break
return
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/cerebriumai.py | import logging
from typing import Any, Dict, List, Mapping, Optional, cast
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, Field, SecretStr, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class CerebriumAI(LLM):
"""CerebriumAI large language models.
To use, you should have the ``cerebrium`` python package installed.
You should also have the environment variable ``CEREBRIUMAI_API_KEY``
set with your API key or pass it as a named argument in the constructor.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import CerebriumAI
cerebrium = CerebriumAI(endpoint_url="", cerebriumai_api_key="my-api-key")
"""
endpoint_url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
cerebriumai_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = set(list(cls.model_fields.keys()))
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cerebriumai_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY")
)
values["cerebriumai_api_key"] = cerebriumai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cerebriumai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
headers: Dict = {
"Authorization": cast(
SecretStr, self.cerebriumai_api_key
).get_secret_value(),
"Content-Type": "application/json",
}
params = self.model_kwargs or {}
payload = {"prompt": prompt, **params, **kwargs}
response = requests.post(self.endpoint_url, json=payload, headers=headers)
if response.status_code == 200:
data = response.json()
text = data["result"]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
else:
response.raise_for_status()
return ""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/fake.py | import asyncio
import time
from typing import Any, AsyncIterator, Iterator, List, Mapping, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.llms import LLM
from langchain_core.runnables import RunnableConfig
class FakeListLLM(LLM):
"""Fake LLM for testing purposes."""
responses: List[str]
sleep: Optional[float] = None
i: int = 0
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake-list"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return next response"""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return next response"""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"responses": self.responses}
class FakeStreamingListLLM(FakeListLLM):
"""Fake streaming list LLM for testing purposes."""
def stream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
result = self.invoke(input, config)
for c in result:
if self.sleep is not None:
time.sleep(self.sleep)
yield c
async def astream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[str]:
result = await self.ainvoke(input, config)
for c in result:
if self.sleep is not None:
await asyncio.sleep(self.sleep)
yield c
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/aphrodite.py | from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain_core.utils import pre_init
from pydantic import Field
class Aphrodite(BaseLLM):
"""Aphrodite language model."""
model: str = ""
"""The name or path of a HuggingFace Transformers model."""
tensor_parallel_size: Optional[int] = 1
"""The number of GPUs to use for distributed execution with tensor parallelism."""
trust_remote_code: Optional[bool] = False
"""Trust remote code (e.g., from HuggingFace) when downloading the model
and tokenizer."""
n: int = 1
"""Number of output sequences to return for the given prompt."""
best_of: Optional[int] = None
"""Number of output sequences that are generated from the prompt.
From these `best_of` sequences, the top `n` sequences are returned.
`best_of` must be >= `n`. This is treated as the beam width when
`use_beam_search` is True. By default, `best_of` is set to `n`."""
presence_penalty: float = 0.0
"""Float that penalizes new tokens based on whether they appear in the
generated text so far. Values > 0 encourage the model to generate new
tokens, while values < 0 encourage the model to repeat tokens."""
frequency_penalty: float = 0.0
"""Float that penalizes new tokens based on their frequency in the
generated text so far. Applied additively to the logits."""
repetition_penalty: float = 1.0
"""Float that penalizes new tokens based on their frequency in the
generated text so far. Applied multiplicatively to the logits."""
temperature: float = 1.0
"""Float that controls the randomness of the sampling. Lower values
make the model more deterministic, while higher values make the model
more random. Zero is equivalent to greedy sampling."""
top_p: float = 1.0
"""Float that controls the cumulative probability of the top tokens to consider.
Must be in (0, 1]. Set to 1.0 to consider all tokens."""
top_k: int = -1
"""Integer that controls the number of top tokens to consider. Set to -1 to
consider all tokens (disabled)."""
top_a: float = 0.0
"""Float that controls the cutoff for Top-A sampling. Exact cutoff is
top_a*max_prob**2. Must be in [0,inf], 0 to disable."""
min_p: float = 0.0
"""Float that controls the cutoff for min-p sampling. Exact cutoff is
min_p*max_prob. Must be in [0,1], 0 to disable."""
tfs: float = 1.0
"""Float that controls the cumulative approximate curvature of the
distribution to retain for Tail Free Sampling. Must be in (0, 1].
Set to 1.0 to disable."""
eta_cutoff: float = 0.0
"""Float that controls the cutoff threshold for Eta sampling
(a form of entropy adaptive truncation sampling). Threshold is
calculated as `min(eta, sqrt(eta)*entropy(probs)). Specified
in units of 1e-4. Set to 0 to disable."""
epsilon_cutoff: float = 0.0
"""Float that controls the cutoff threshold for Epsilon sampling
(simple probability threshold truncation). Specified in units of
1e-4. Set to 0 to disable."""
typical_p: float = 1.0
"""Float that controls the cumulative probability of tokens closest
in surprise to the expected surprise to consider. Must be in (0, 1].
Set to 1 to disable."""
mirostat_mode: int = 0
"""The mirostat mode to use. 0 for no mirostat, 2 for mirostat v2.
Mode 1 is not supported."""
mirostat_tau: float = 0.0
"""The target 'surprisal' that mirostat works towards. Range [0, inf)."""
use_beam_search: bool = False
"""Whether to use beam search instead of sampling."""
length_penalty: float = 1.0
"""Float that penalizes sequences based on their length. Used only
when `use_beam_search` is True."""
early_stopping: bool = False
"""Controls the stopping condition for beam search. It accepts the
following values: `True`, where the generation stops as soon as there
are `best_of` complete candidates; `False`, where a heuristic is applied
to the generation stops when it is very unlikely to find better candidates;
`never`, where the beam search procedure only stops where there cannot be
better candidates (canonical beam search algorithm)."""
stop: Optional[List[str]] = None
"""List of strings that stop the generation when they are generated.
The returned output will not contain the stop tokens."""
stop_token_ids: Optional[List[int]] = None
"""List of tokens that stop the generation when they are generated.
The returned output will contain the stop tokens unless the stop tokens
are special tokens."""
ignore_eos: bool = False
"""Whether to ignore the EOS token and continue generating tokens after
the EOS token is generated."""
max_tokens: int = 512
"""Maximum number of tokens to generate per output sequence."""
logprobs: Optional[int] = None
"""Number of log probabilities to return per output token."""
prompt_logprobs: Optional[int] = None
"""Number of log probabilities to return per prompt token."""
custom_token_bans: Optional[List[int]] = None
"""List of token IDs to ban from generating."""
skip_special_tokens: bool = True
"""Whether to skip special tokens in the output. Defaults to True."""
spaces_between_special_tokens: bool = True
"""Whether to add spaces between special tokens in the output.
Defaults to True."""
logit_bias: Optional[Dict[str, float]] = None
"""List of LogitsProcessors to change the probability of token
prediction at runtime."""
dtype: str = "auto"
"""The data type for the model weights and activations."""
download_dir: Optional[str] = None
"""Directory to download and load the weights. (Default to the default
cache dir of huggingface)"""
quantization: Optional[str] = None
"""Quantization mode to use. Can be one of `awq` or `gptq`."""
aphrodite_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `aphrodite.LLM` call not explicitly
specified."""
client: Any = None #: :meta private:
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from aphrodite import LLM as AphroditeModel
except ImportError:
raise ImportError(
"Could not import aphrodite-engine python package. "
"Please install it with `pip install aphrodite-engine`."
)
# aphrodite_kwargs = values["aphrodite_kwargs"]
# if values.get("quantization"):
# aphrodite_kwargs["quantization"] = values["quantization"]
values["client"] = AphroditeModel(
model=values["model"],
tensor_parallel_size=values["tensor_parallel_size"],
trust_remote_code=values["trust_remote_code"],
dtype=values["dtype"],
download_dir=values["download_dir"],
**values["aphrodite_kwargs"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling aphrodite."""
return {
"n": self.n,
"best_of": self.best_of,
"max_tokens": self.max_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"top_a": self.top_a,
"min_p": self.min_p,
"temperature": self.temperature,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"repetition_penalty": self.repetition_penalty,
"tfs": self.tfs,
"eta_cutoff": self.eta_cutoff,
"epsilon_cutoff": self.epsilon_cutoff,
"typical_p": self.typical_p,
"mirostat_mode": self.mirostat_mode,
"mirostat_tau": self.mirostat_tau,
"length_penalty": self.length_penalty,
"early_stopping": self.early_stopping,
"use_beam_search": self.use_beam_search,
"stop": self.stop,
"ignore_eos": self.ignore_eos,
"logprobs": self.logprobs,
"prompt_logprobs": self.prompt_logprobs,
"custom_token_bans": self.custom_token_bans,
"skip_special_tokens": self.skip_special_tokens,
"spaces_between_special_tokens": self.spaces_between_special_tokens,
"logit_bias": self.logit_bias,
}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
from aphrodite import SamplingParams
# build sampling parameters
params = {**self._default_params, **kwargs, "stop": stop}
if "logit_bias" in params:
del params["logit_bias"]
sampling_params = SamplingParams(**params)
# call the model
outputs = self.client.generate(prompts, sampling_params)
generations = []
for output in outputs:
text = output.outputs[0].text
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aphrodite"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/tongyi.py | from __future__ import annotations
import asyncio
import functools
import logging
from typing import (
Any,
AsyncIterable,
AsyncIterator,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import Field
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
T = TypeVar("T")
def _create_retry_decorator(llm: Tongyi) -> Callable[[Any], Any]:
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterward
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def check_response(resp: Any) -> Any:
"""Check the response from the completion call."""
if resp["status_code"] == 200:
return resp
elif resp["status_code"] in [400, 401]:
raise ValueError(
f"status_code: {resp['status_code']} \n "
f"code: {resp['code']} \n message: {resp['message']}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp['status_code']} \n "
f"code: {resp['code']} \n message: {resp['message']}",
response=resp,
)
def generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _generate_with_retry(**_kwargs: Any) -> Any:
resp = llm.client.call(**_kwargs)
return check_response(resp)
return _generate_with_retry(**kwargs)
def stream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _stream_generate_with_retry(**_kwargs: Any) -> Any:
responses = llm.client.call(**_kwargs)
for resp in responses:
yield check_response(resp)
return _stream_generate_with_retry(**kwargs)
async def astream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Async version of `stream_generate_with_retry`.
Because the dashscope SDK doesn't provide an async API,
we wrap `stream_generate_with_retry` with an async generator."""
class _AioTongyiGenerator:
def __init__(self, _llm: Tongyi, **_kwargs: Any):
self.generator = stream_generate_with_retry(_llm, **_kwargs)
def __aiter__(self) -> AsyncIterator[Any]:
return self
async def __anext__(self) -> Any:
value = await asyncio.get_running_loop().run_in_executor(
None, self._safe_next
)
if value is not None:
return value
else:
raise StopAsyncIteration
def _safe_next(self) -> Any:
try:
return next(self.generator)
except StopIteration:
return None
async for chunk in _AioTongyiGenerator(llm, **kwargs):
yield chunk
def generate_with_last_element_mark(iterable: Iterable[T]) -> Iterator[Tuple[T, bool]]:
"""Generate elements from an iterable,
and a boolean indicating if it is the last element."""
iterator = iter(iterable)
try:
item = next(iterator)
except StopIteration:
return
for next_item in iterator:
yield item, False
item = next_item
yield item, True
async def agenerate_with_last_element_mark(
iterable: AsyncIterable[T],
) -> AsyncIterator[Tuple[T, bool]]:
"""Generate elements from an async iterable,
and a boolean indicating if it is the last element."""
iterator = iterable.__aiter__()
try:
item = await iterator.__anext__()
except StopAsyncIteration:
return
async for next_item in iterator:
yield item, False
item = next_item
yield item, True
class Tongyi(BaseLLM):
"""Tongyi completion model integration.
Setup:
Install ``dashscope`` and set environment variables ``DASHSCOPE_API_KEY``.
.. code-block:: bash
pip install dashscope
export DASHSCOPE_API_KEY="your-api-key"
Key init args — completion params:
model: str
Name of Tongyi model to use.
top_p: float
Total probability mass of tokens to consider at each step.
streaming: bool
Whether to stream the results or not.
Key init args — client params:
api_key: Optional[str]
Dashscope API KEY. If not passed in will be read from env var DASHSCOPE_API_KEY.
max_retries: int
Maximum number of retries to make when generating.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.llms import Tongyi
llm = Tongyi(
model="qwen-max",
# top_p="...",
# api_key="...",
# other params...
)
Invoke:
.. code-block:: python
input_text = "用50个字左右阐述,生命的意义在于"
llm.invoke(input_text)
.. code-block:: python
'探索、成长、连接与爱——在有限的时间里,不断学习、体验、贡献并寻找与世界和谐共存之道,让每一刻充满价值与意义。'
Stream:
.. code-block:: python
for chunk in llm.stream(input_text):
print(chunk)
.. code-block:: python
探索 | 、 | 成长 | 、连接与爱。 | 在有限的时间里,寻找个人价值, | 贡献于他人,共同体验世界的美好 | ,让世界因自己的存在而更 | 温暖。
Async:
.. code-block:: python
await llm.ainvoke(input_text)
# stream:
# async for chunk in llm.astream(input_text):
# print(chunk)
# batch:
# await llm.abatch([input_text])
.. code-block:: python
'探索、成长、连接与爱。在有限的时间里,寻找个人价值,贡献于他人和社会,体验丰富多彩的情感与经历,不断学习进步,让世界因自己的存在而更美好。'
""" # noqa: E501
@property
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
client: Any = None #: :meta private:
model_name: str = Field(default="qwen-plus", alias="model")
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
top_p: float = 0.8
"""Total probability mass of tokens to consider at each step."""
dashscope_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Dashscope api key provide by Alibaba Cloud."""
streaming: bool = False
"""Whether to stream the results or not."""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "tongyi"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["dashscope_api_key"] = get_from_dict_or_env(
values, ["dashscope_api_key", "api_key"], "DASHSCOPE_API_KEY"
)
try:
import dashscope
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope`."
)
try:
values["client"] = dashscope.Generation
except AttributeError:
raise ValueError(
"`dashscope` has no `Generation` attribute, this is likely "
"due to an old version of the dashscope package. Try upgrading it "
"with `pip install --upgrade dashscope`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Tongyi Qwen API."""
normal_params = {
"model": self.model_name,
"top_p": self.top_p,
"api_key": self.dashscope_api_key,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"model_name": self.model_name, **super()._identifying_params}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
generations = []
if self.streaming:
if len(prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
generations.append([self._chunk_to_generation(generation)])
else:
params: Dict[str, Any] = self._invocation_params(stop=stop, **kwargs)
for prompt in prompts:
completion = generate_with_retry(self, prompt=prompt, **params)
generations.append(
[Generation(**self._generation_from_qwen_resp(completion))]
)
return LLMResult(
generations=generations,
llm_output={
"model_name": self.model_name,
},
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
generations = []
if self.streaming:
if len(prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
generations.append([self._chunk_to_generation(generation)])
else:
params: Dict[str, Any] = self._invocation_params(stop=stop, **kwargs)
for prompt in prompts:
completion = await asyncio.get_running_loop().run_in_executor(
None,
functools.partial(
generate_with_retry, **{"llm": self, "prompt": prompt, **params}
),
)
generations.append(
[Generation(**self._generation_from_qwen_resp(completion))]
)
return LLMResult(
generations=generations,
llm_output={
"model_name": self.model_name,
},
)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params: Dict[str, Any] = self._invocation_params(
stop=stop, stream=True, **kwargs
)
for stream_resp, is_last_chunk in generate_with_last_element_mark(
stream_generate_with_retry(self, prompt=prompt, **params)
):
chunk = GenerationChunk(
**self._generation_from_qwen_resp(stream_resp, is_last_chunk)
)
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params: Dict[str, Any] = self._invocation_params(
stop=stop, stream=True, **kwargs
)
async for stream_resp, is_last_chunk in agenerate_with_last_element_mark(
astream_generate_with_retry(self, prompt=prompt, **params)
):
chunk = GenerationChunk(
**self._generation_from_qwen_resp(stream_resp, is_last_chunk)
)
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
yield chunk
def _invocation_params(self, stop: Any, **kwargs: Any) -> Dict[str, Any]:
params = {
**self._default_params,
**kwargs,
}
if stop is not None:
params["stop"] = stop
if params.get("stream"):
params["incremental_output"] = True
return params
@staticmethod
def _generation_from_qwen_resp(
resp: Any, is_last_chunk: bool = True
) -> Dict[str, Any]:
# According to the response from dashscope,
# each chunk's `generation_info` overwrites the previous one.
# Besides, The `merge_dicts` method,
# which is used to concatenate `generation_info` in `GenerationChunk`,
# does not support merging of int type values.
# Therefore, we adopt the `generation_info` of the last chunk
# and discard the `generation_info` of the intermediate chunks.
if is_last_chunk:
return dict(
text=resp["output"]["text"],
generation_info=dict(
finish_reason=resp["output"]["finish_reason"],
request_id=resp["request_id"],
token_usage=dict(resp["usage"]),
),
)
else:
return dict(text=resp["output"]["text"])
@staticmethod
def _chunk_to_generation(chunk: GenerationChunk) -> Generation:
return Generation(
text=chunk.text,
generation_info=chunk.generation_info,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/openai.py | from __future__ import annotations
import logging
import os
import sys
import warnings
from typing import (
AbstractSet,
Any,
AsyncIterator,
Awaitable,
Callable,
Collection,
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import BaseLLM, create_base_retry_decorator
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from langchain_core.utils.pydantic import get_fields
from langchain_core.utils.utils import _build_model_kwargs
from pydantic import ConfigDict, Field, model_validator
from langchain_community.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def _stream_response_to_generation_chunk(
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
if not stream_response["choices"]:
return GenerationChunk(text="")
return GenerationChunk(
text=stream_response["choices"][0]["text"],
generation_info=dict(
finish_reason=stream_response["choices"][0].get("finish_reason", None),
logprobs=stream_response["choices"][0].get("logprobs", None),
),
)
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:
"""Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0].get(
"finish_reason", None
)
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"]
def _streaming_response_template() -> Dict[str, Any]:
return {
"choices": [
{
"text": "",
"finish_reason": None,
"logprobs": None,
}
]
}
def _create_retry_decorator(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import openai
errors = [
openai.error.Timeout, # type: ignore[attr-defined]
openai.error.APIError, # type: ignore[attr-defined]
openai.error.APIConnectionError, # type: ignore[attr-defined]
openai.error.RateLimitError, # type: ignore[attr-defined]
openai.error.ServiceUnavailableError, # type: ignore[attr-defined]
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
def completion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
if is_openai_v1():
return llm.client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
if is_openai_v1():
return await llm.async_client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
class BaseOpenAI(BaseLLM):
"""Base OpenAI large language model class."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "openai"]
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.openai_api_base:
attributes["openai_api_base"] = self.openai_api_base
if self.openai_organization:
attributes["openai_organization"] = self.openai_organization
if self.openai_proxy:
attributes["openai_proxy"] = self.openai_proxy
return attributes
@classmethod
def is_lc_serializable(cls) -> bool:
return True
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="gpt-3.5-turbo-instruct", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(default=None, alias="organization")
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
request_timeout: Union[float, Tuple[float, float], Any, None] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or
None."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) # type: ignore[arg-type]
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = None
"""Optional httpx.Client."""
def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore
"""Initialize the OpenAI object."""
model_name = data.get("model_name", "")
if (
model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4")
) and "-instruct" not in model_name:
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain_community.chat_models import ChatOpenAI`"
)
return OpenAIChat(**data)
return super().__new__(cls)
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
values = _build_model_kwargs(values, all_required_field_names)
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
client_params = {
"api_key": values["openai_api_key"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(**client_params).completions
elif not values.get("client"):
values["client"] = openai.Completion # type: ignore[attr-defined]
else:
pass
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params: Dict[str, Any] = {
"temperature": self.temperature,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
if self.max_tokens is not None:
normal_params["max_tokens"] = self.max_tokens
if self.request_timeout is not None and not is_openai_v1():
normal_params["request_timeout"] = self.request_timeout
# Azure gpt-35-turbo doesn't support best_of
# don't specify best_of if it is 1
if self.best_of > 1:
normal_params["best_of"] = self.best_of
return {**normal_params, **self.model_kwargs}
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutates params
for stream_resp in completion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
chunk = _stream_response_to_generation_chunk(stream_resp)
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutates params
async for stream_resp in await acompletion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
chunk = _stream_response_to_generation_chunk(stream_resp)
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
yield chunk
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
# TODO: write a unit test for this
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = completion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
if not isinstance(response, dict):
# V1 client returns the response in an PyDantic object instead of
# dict. For the transition period, we deep convert it to dict.
response = response.dict()
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
if not system_fingerprint:
system_fingerprint = response.get("system_fingerprint")
return self.create_llm_result(
choices,
prompts,
params,
token_usage,
system_fingerprint=system_fingerprint,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint async with k unique prompts."""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(
_prompts[0], stop, run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = await acompletion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
if not isinstance(response, dict):
response = response.dict()
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(
choices,
prompts,
params,
token_usage,
system_fingerprint=system_fingerprint,
)
def get_sub_prompts(
self,
params: Dict[str, Any],
prompts: List[str],
stop: Optional[List[str]] = None,
) -> List[List[str]]:
"""Get the sub prompts for llm call."""
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params["max_tokens"] == -1:
if len(prompts) != 1:
raise ValueError(
"max_tokens set to -1 not supported for multiple inputs."
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
def create_llm_result(
self,
choices: Any,
prompts: List[str],
params: Dict[str, Any],
token_usage: Dict[str, int],
*,
system_fingerprint: Optional[str] = None,
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
n = params.get("n", self.n)
for i, _ in enumerate(prompts):
sub_choices = choices[i * n : (i + 1) * n]
generations.append(
[
Generation(
text=choice["text"],
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
for choice in sub_choices
]
)
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
if system_fingerprint:
llm_output["system_fingerprint"] = system_fingerprint
return LLMResult(generations=generations, llm_output=llm_output)
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {}
if not is_openai_v1():
openai_creds.update(
{
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
"organization": self.openai_organization,
}
)
if self.openai_proxy:
import openai
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # type: ignore[attr-defined] # type: ignore[attr-defined] # type: ignore[attr-defined] # type: ignore[attr-defined] # type: ignore[attr-defined] # type: ignore[attr-defined]
return {**openai_creds, **self._default_params}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai"
def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
model_name = self.tiktoken_model_name or self.model_name
try:
enc = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
enc = tiktoken.get_encoding(model)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
@staticmethod
def modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct")
"""
model_token_mapping = {
"gpt-4o": 128_000,
"gpt-4o-2024-05-13": 128_000,
"gpt-4": 8192,
"gpt-4-0314": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-32k-0613": 32768,
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k": 16385,
"gpt-3.5-turbo-16k-0613": 16385,
"gpt-3.5-turbo-instruct": 4096,
"text-ada-001": 2049,
"ada": 2049,
"text-babbage-001": 2040,
"babbage": 2049,
"text-curie-001": 2049,
"curie": 2049,
"davinci": 2049,
"text-davinci-003": 4097,
"text-davinci-002": 4097,
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
# handling finetuned models
if "ft-" in modelname:
modelname = modelname.split(":")[0]
context_size = model_token_mapping.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_token_mapping.keys())
)
return context_size
@property
def max_context_size(self) -> int:
"""Get max context size for this model."""
return self.modelname_to_contextsize(self.model_name)
def max_tokens_for_prompt(self, prompt: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a prompt.
Args:
prompt: The prompt to pass into the model.
Returns:
The maximum number of tokens to generate for a prompt.
Example:
.. code-block:: python
max_tokens = openai.max_token_for_prompt("Tell me a joke.")
"""
num_tokens = self.get_num_tokens(prompt)
return self.max_context_size - num_tokens
@deprecated(since="0.0.10", removal="1.0", alternative_import="langchain_openai.OpenAI")
class OpenAI(BaseOpenAI):
"""OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import OpenAI
openai = OpenAI(model_name="gpt-3.5-turbo-instruct")
"""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "openai"]
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **super()._invocation_params}
@deprecated(
since="0.0.10", removal="1.0", alternative_import="langchain_openai.AzureOpenAI"
)
class AzureOpenAI(BaseOpenAI):
"""Azure-specific OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import AzureOpenAI
openai = AzureOpenAI(model_name="gpt-3.5-turbo-instruct")
"""
azure_endpoint: Union[str, None] = None
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_version: str = Field(default="", alias="api_version")
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
openai_api_key: Union[str, None] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: Union[str, None] = None
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
"""
azure_ad_token_provider: Union[Callable[[], str], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every sync request. For async requests,
will be invoked if `azure_ad_async_token_provider` is not provided.
"""
azure_ad_async_token_provider: Union[Callable[[], Awaitable[str]], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every async request.
"""
openai_api_type: str = ""
"""Legacy, for openai<1.0.0 support."""
validate_base_url: bool = True
"""For backwards compatibility. If legacy val openai_api_base is passed in, try to
infer if it is a base_url or azure_endpoint and update accordingly.
"""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "openai"]
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
values["openai_api_key"] = (
values["openai_api_key"]
or os.getenv("AZURE_OPENAI_API_KEY")
or os.getenv("OPENAI_API_KEY")
)
values["azure_endpoint"] = values["azure_endpoint"] or os.getenv(
"AZURE_OPENAI_ENDPOINT"
)
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv(
"AZURE_OPENAI_AD_TOKEN"
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["openai_api_version"] = values["openai_api_version"] or os.getenv(
"OPENAI_API_VERSION"
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
if "/openai" not in openai_api_base:
values["openai_api_base"] = (
values["openai_api_base"].rstrip("/") + "/openai"
)
warnings.warn(
"As of openai>=1.0.0, Azure endpoints should be specified via "
f"the `azure_endpoint` param not `openai_api_base` "
f"(or alias `base_url`). Updating `openai_api_base` from "
f"{openai_api_base} to {values['openai_api_base']}."
)
if values["deployment_name"]:
warnings.warn(
"As of openai>=1.0.0, if `deployment_name` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment_name` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
if values["deployment_name"] not in values["openai_api_base"]:
warnings.warn(
"As of openai>=1.0.0, if `openai_api_base` "
"(or alias `base_url`) is specified it is expected to be "
"of the form "
"https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501
f"Updating {openai_api_base} to "
f"{values['openai_api_base']}."
)
values["openai_api_base"] += (
"/deployments/" + values["deployment_name"]
)
values["deployment_name"] = None
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment_name"],
"api_key": values["openai_api_key"],
"azure_ad_token": values["azure_ad_token"],
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
values["client"] = openai.AzureOpenAI(**client_params).completions
azure_ad_async_token_provider = values["azure_ad_async_token_provider"]
if azure_ad_async_token_provider:
client_params["azure_ad_token_provider"] = azure_ad_async_token_provider
values["async_client"] = openai.AsyncAzureOpenAI(
**client_params
).completions
else:
values["client"] = openai.Completion # type: ignore[attr-defined]
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
**{"deployment_name": self.deployment_name},
**super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
if is_openai_v1():
openai_params = {"model": self.deployment_name}
else:
openai_params = {
"engine": self.deployment_name,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
return {**openai_params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azure"
@property
def lc_attributes(self) -> Dict[str, Any]:
return {
"openai_api_type": self.openai_api_type,
"openai_api_version": self.openai_api_version,
}
@deprecated(
since="0.0.1",
removal="1.0",
alternative_import="langchain_openai.ChatOpenAI",
)
class OpenAIChat(BaseLLM):
"""OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import OpenAIChat
openaichat = OpenAIChat(model_name="gpt-3.5-turbo")
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
max_retries: int = 6
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in get_fields(cls).values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
openai_organization = get_from_dict_or_env(
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
)
try:
import openai
openai.api_key = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base # type: ignore[attr-defined]
if openai_organization:
openai.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # type: ignore[attr-defined] # type: ignore[attr-defined] # type: ignore[attr-defined] # type: ignore[attr-defined] # type: ignore[attr-defined] # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion # type: ignore[attr-defined]
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain_community.chat_models import ChatOpenAI`"
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return self.model_kwargs
def _get_chat_params(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> Tuple:
if len(prompts) > 1:
raise ValueError(
f"OpenAIChat currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params.get("max_tokens") == -1:
# for ChatGPT api, omitting max_tokens is equivalent to having no limit
del params["max_tokens"]
return messages, params
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
if run_manager:
await run_manager.on_llm_new_token(token, chunk=chunk)
yield chunk
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
if not isinstance(full_response, dict):
full_response = full_response.dict()
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
if not isinstance(full_response, dict):
full_response = full_response.dict()
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai-chat"
def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_token_ids(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
enc = tiktoken.encoding_for_model(self.model_name)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/llms/ollama.py | from __future__ import annotations
import json
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Tuple,
Union,
)
import aiohttp
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import GenerationChunk, LLMResult
from pydantic import ConfigDict
def _stream_response_to_generation_chunk(
stream_response: str,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return GenerationChunk(
text=parsed_response.get("response", ""), generation_info=generation_info
)
class OllamaEndpointNotFoundError(Exception):
"""Raised when the Ollama endpoint is not found."""
class _OllamaCommon(BaseLanguageModel):
base_url: str = "http://localhost:11434"
"""Base url the model is hosted under."""
model: str = "llama2"
"""Model name to use."""
mirostat: Optional[int] = None
"""Enable Mirostat sampling for controlling perplexity.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
mirostat_eta: Optional[float] = None
"""Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: 0.1)"""
mirostat_tau: Optional[float] = None
"""Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and
coherent text. (Default: 5.0)"""
num_ctx: Optional[int] = None
"""Sets the size of the context window used to generate the
next token. (Default: 2048) """
num_gpu: Optional[int] = None
"""The number of GPUs to use. On macOS it defaults to 1 to
enable metal support, 0 to disable."""
num_thread: Optional[int] = None
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance.
It is recommended to set this value to the number of physical
CPU cores your system has (as opposed to the logical number of cores)."""
num_predict: Optional[int] = None
"""Maximum number of tokens to predict when generating text.
(Default: 128, -1 = infinite generation, -2 = fill context)"""
repeat_last_n: Optional[int] = None
"""Sets how far back for the model to look back to prevent
repetition. (Default: 64, 0 = disabled, -1 = num_ctx)"""
repeat_penalty: Optional[float] = None
"""Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
will penalize repetitions more strongly, while a lower value (e.g., 0.9)
will be more lenient. (Default: 1.1)"""
temperature: Optional[float] = None
"""The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: 0.8)"""
stop: Optional[List[str]] = None
"""Sets the stop tokens to use."""
tfs_z: Optional[float] = None
"""Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the
impact more, while a value of 1.0 disables this setting. (default: 1)"""
top_k: Optional[int] = None
"""Reduces the probability of generating nonsense. A higher value (e.g. 100)
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[float] = None
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""
system: Optional[str] = None
"""system prompt (overrides what is defined in the Modelfile)"""
template: Optional[str] = None
"""full prompt or prompt template (overrides what is defined in the Modelfile)"""
format: Optional[str] = None
"""Specify the format of the output (e.g., json)"""
timeout: Optional[int] = None
"""Timeout for the request stream"""
keep_alive: Optional[Union[int, str]] = None
"""How long the model will stay loaded into memory.
The parameter (Default: 5 minutes) can be set to:
1. a duration string in Golang (such as "10m" or "24h");
2. a number in seconds (such as 3600);
3. any negative number which will keep the model loaded \
in memory (e.g. -1 or "-1m");
4. 0 which will unload the model immediately after generating a response;
See the [Ollama documents](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-keep-a-model-loaded-in-memory-or-make-it-unload-immediately)"""
raw: Optional[bool] = None
"""raw or not."""
headers: Optional[dict] = None
"""Additional headers to pass to endpoint (e.g. Authorization, Referer).
This is useful when Ollama is hosted on cloud services that require
tokens for authentication.
"""
auth: Union[Callable, Tuple, None] = None
"""Additional auth tuple or callable to enable Basic/Digest/Custom HTTP Auth.
Expects the same format, type and values as requests.request auth parameter."""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {
"model": self.model,
"format": self.format,
"options": {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"num_predict": self.num_predict,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"stop": self.stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
"system": self.system,
"template": self.template,
"keep_alive": self.keep_alive,
"raw": self.raw,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model, "format": self.format}, **self._default_params}
def _create_generate_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
images: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
payload = {"prompt": prompt, "images": images}
yield from self._create_stream(
payload=payload,
stop=stop,
api_url=f"{self.base_url}/api/generate",
**kwargs,
)
async def _acreate_generate_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
images: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[str]:
payload = {"prompt": prompt, "images": images}
async for item in self._acreate_stream(
payload=payload,
stop=stop,
api_url=f"{self.base_url}/api/generate",
**kwargs,
):
yield item
def _create_stream(
self,
api_url: str,
payload: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
params = self._default_params
for key in self._default_params:
if key in kwargs:
params[key] = kwargs[key]
if "options" in kwargs:
params["options"] = kwargs["options"]
else:
params["options"] = {
**params["options"],
"stop": stop,
**{k: v for k, v in kwargs.items() if k not in self._default_params},
}
if payload.get("messages"):
request_payload = {"messages": payload.get("messages", []), **params}
else:
request_payload = {
"prompt": payload.get("prompt"),
"images": payload.get("images", []),
**params,
}
response = requests.post(
url=api_url,
headers={
"Content-Type": "application/json",
**(self.headers if isinstance(self.headers, dict) else {}),
},
auth=self.auth,
json=request_payload,
stream=True,
timeout=self.timeout,
)
response.encoding = "utf-8"
if response.status_code != 200:
if response.status_code == 404:
raise OllamaEndpointNotFoundError(
"Ollama call failed with status code 404. "
"Maybe your model is not found "
f"and you should pull the model with `ollama pull {self.model}`."
)
else:
optional_detail = response.text
raise ValueError(
f"Ollama call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
return response.iter_lines(decode_unicode=True)
async def _acreate_stream(
self,
api_url: str,
payload: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[str]:
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
params = self._default_params
for key in self._default_params:
if key in kwargs:
params[key] = kwargs[key]
if "options" in kwargs:
params["options"] = kwargs["options"]
else:
params["options"] = {
**params["options"],
"stop": stop,
**{k: v for k, v in kwargs.items() if k not in self._default_params},
}
if payload.get("messages"):
request_payload = {"messages": payload.get("messages", []), **params}
else:
request_payload = {
"prompt": payload.get("prompt"),
"images": payload.get("images", []),
**params,
}
async with aiohttp.ClientSession() as session:
async with session.post(
url=api_url,
headers={
"Content-Type": "application/json",
**(self.headers if isinstance(self.headers, dict) else {}),
},
auth=self.auth, # type: ignore[arg-type]
json=request_payload,
timeout=self.timeout, # type: ignore[arg-type]
) as response:
if response.status != 200:
if response.status == 404:
raise OllamaEndpointNotFoundError(
"Ollama call failed with status code 404."
)
else:
optional_detail = response.text
raise ValueError(
f"Ollama call failed with status code {response.status}."
f" Details: {optional_detail}"
)
async for line in response.content:
yield line.decode("utf-8")
def _stream_with_aggregation(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
verbose: bool = False,
**kwargs: Any,
) -> GenerationChunk:
final_chunk: Optional[GenerationChunk] = None
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=verbose,
)
if final_chunk is None:
raise ValueError("No data received from Ollama stream.")
return final_chunk
async def _astream_with_aggregation(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
verbose: bool = False,
**kwargs: Any,
) -> GenerationChunk:
final_chunk: Optional[GenerationChunk] = None
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
verbose=verbose,
)
if final_chunk is None:
raise ValueError("No data received from Ollama stream.")
return final_chunk
@deprecated(
since="0.3.1",
removal="1.0.0",
alternative_import="langchain_ollama.OllamaLLM",
)
class Ollama(BaseLLM, _OllamaCommon):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain_community.llms import Ollama
ollama = Ollama(model="llama2")
"""
model_config = ConfigDict(
extra="forbid",
)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ollama-llm"
def _generate( # type: ignore[override]
self,
prompts: List[str],
stop: Optional[List[str]] = None,
images: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Ollama's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ollama("Tell me a joke.")
"""
# TODO: add caching here.
generations = []
for prompt in prompts:
final_chunk = super()._stream_with_aggregation(
prompt,
stop=stop,
images=images,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
generations.append([final_chunk])
return LLMResult(generations=generations) # type: ignore[arg-type]
async def _agenerate( # type: ignore[override]
self,
prompts: List[str],
stop: Optional[List[str]] = None,
images: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Ollama's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ollama("Tell me a joke.")
"""
# TODO: add caching here.
generations = []
for prompt in prompts:
final_chunk = await super()._astream_with_aggregation(
prompt,
stop=stop,
images=images,
run_manager=run_manager, # type: ignore[arg-type]
verbose=self.verbose,
**kwargs,
)
generations.append([final_chunk])
return LLMResult(generations=generations) # type: ignore[arg-type]
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
yield chunk
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
yield chunk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.