File size: 5,392 Bytes
22dcdfd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
from typing import Any, Literal, NotRequired
from pydantic import BaseModel, Field, SerializeAsAny
from typing_extensions import TypedDict
from schema.models import AllModelEnum, AnthropicModelName, OpenAIModelName
class AgentInfo(BaseModel):
"""Info about an available agent."""
key: str = Field(
description="Agent key.",
examples=["research-assistant"],
)
description: str = Field(
description="Description of the agent.",
examples=["A research assistant for generating research papers."],
)
class ServiceMetadata(BaseModel):
"""Metadata about the service including available agents and models."""
agents: list[AgentInfo] = Field(
description="List of available agents.",
)
models: list[AllModelEnum] = Field(
description="List of available LLMs.",
)
default_agent: str = Field(
description="Default agent used when none is specified.",
examples=["research-assistant"],
)
default_model: AllModelEnum = Field(
description="Default model used when none is specified.",
)
class UserInput(BaseModel):
"""Basic user input for the agent."""
message: str = Field(
description="User input to the agent.",
examples=["What is the weather in Tokyo?"],
)
model: SerializeAsAny[AllModelEnum] | None = Field(
title="Model",
description="LLM Model to use for the agent. Defaults to the default model set in the settings of the service.",
default=None,
examples=[OpenAIModelName.GPT_5_NANO, AnthropicModelName.HAIKU_45],
)
thread_id: str | None = Field(
description="Thread ID to persist and continue a multi-turn conversation.",
default=None,
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
)
user_id: str | None = Field(
description="User ID to persist and continue a conversation across multiple threads.",
default=None,
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
)
agent_config: dict[str, Any] = Field(
description="Additional configuration to pass through to the agent",
default={},
examples=[{"spicy_level": 0.8}],
)
class StreamInput(UserInput):
"""User input for streaming the agent's response."""
stream_tokens: bool = Field(
description="Whether to stream LLM tokens to the client.",
default=True,
)
class ToolCall(TypedDict):
"""Represents a request to call a tool."""
name: str
"""The name of the tool to be called."""
args: dict[str, Any]
"""The arguments to the tool call."""
id: str | None
"""An identifier associated with the tool call."""
type: NotRequired[Literal["tool_call"]]
class ChatMessage(BaseModel):
"""Message in a chat."""
type: Literal["human", "ai", "tool", "custom"] = Field(
description="Role of the message.",
examples=["human", "ai", "tool", "custom"],
)
content: str = Field(
description="Content of the message.",
examples=["Hello, world!"],
)
tool_calls: list[ToolCall] = Field(
description="Tool calls in the message.",
default=[],
)
tool_call_id: str | None = Field(
description="Tool call that this message is responding to.",
default=None,
examples=["call_Jja7J89XsjrOLA5r!MEOW!SL"],
)
run_id: str | None = Field(
description="Run ID of the message.",
default=None,
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
)
response_metadata: dict[str, Any] = Field(
description="Response metadata. For example: response headers, logprobs, token counts.",
default={},
)
custom_data: dict[str, Any] = Field(
description="Custom message data.",
default={},
)
def pretty_repr(self) -> str:
"""Get a pretty representation of the message."""
base_title = self.type.title() + " Message"
padded = " " + base_title + " "
sep_len = (80 - len(padded)) // 2
sep = "=" * sep_len
second_sep = sep + "=" if len(padded) % 2 else sep
title = f"{sep}{padded}{second_sep}"
return f"{title}\n\n{self.content}"
def pretty_print(self) -> None:
print(self.pretty_repr()) # noqa: T201
class Feedback(BaseModel): # type: ignore[no-redef]
"""Feedback for a run, to record to LangSmith."""
run_id: str = Field(
description="Run ID to record feedback for.",
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
)
key: str = Field(
description="Feedback key.",
examples=["human-feedback-stars"],
)
score: float = Field(
description="Feedback score.",
examples=[0.8],
)
kwargs: dict[str, Any] = Field(
description="Additional feedback kwargs, passed to LangSmith.",
default={},
examples=[{"comment": "In-line human feedback"}],
)
class FeedbackResponse(BaseModel):
status: Literal["success"] = "success"
class ChatHistoryInput(BaseModel):
"""Input for retrieving chat history."""
thread_id: str = Field(
description="Thread ID to persist and continue a multi-turn conversation.",
examples=["847c6285-8fc9-4560-a83f-4e6285809254"],
)
class ChatHistory(BaseModel):
messages: list[ChatMessage]
|