File size: 9,257 Bytes
77830e0
 
cd70b5d
77830e0
f3518c5
5cb387e
f3518c5
 
 
7db4283
f3518c5
f2fc66c
f3518c5
 
5cb387e
f2fc66c
5cb387e
f3518c5
 
 
4f6a1a5
f3518c5
 
77830e0
f3518c5
77830e0
f8272eb
5cb387e
 
 
 
f3518c5
f8272eb
 
 
 
 
 
 
f3518c5
 
4f6a1a5
f3518c5
 
 
 
5cb387e
f3518c5
 
77830e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3518c5
4f6a1a5
f3518c5
 
 
 
5cb387e
 
f3518c5
 
cd70b5d
 
f3518c5
 
cd70b5d
f3518c5
cd70b5d
 
 
 
 
 
f3518c5
cd70b5d
77830e0
 
cd70b5d
 
 
77830e0
 
 
 
 
f3518c5
 
cd70b5d
 
f3518c5
 
cd70b5d
f3518c5
cd70b5d
 
 
f3518c5
 
 
4f6a1a5
f3518c5
 
 
 
 
cd70b5d
 
 
 
70ff909
93d79eb
cd70b5d
 
 
 
 
 
 
 
f3518c5
7db4283
2ad2310
93d79eb
cd70b5d
 
 
 
77830e0
 
 
 
 
ff54322
77830e0
 
ff54322
 
 
 
5cb387e
 
 
 
 
 
 
 
 
77830e0
 
 
 
 
 
 
 
 
 
 
 
 
ff54322
 
77830e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff54322
 
77830e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff54322
5cb387e
 
77830e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5cb387e
77830e0
 
 
 
 
 
 
5cb387e
77830e0
 
5cb387e
 
77830e0
5cb387e
77830e0
 
 
 
f234f8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77830e0
 
 
 
f234f8e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
from __future__ import annotations

from datetime import datetime
from typing import Any, Dict, List, Literal, Optional, Union

from pydantic import BaseModel, Field, model_validator


class ContentItem(BaseModel):
    """Individual content item (text, image, or file) within a message."""

    type: Literal["text", "image_url", "file", "input_audio"]
    text: Optional[str] = None
    image_url: Optional[Dict[str, str]] = None
    input_audio: Optional[Dict[str, Any]] = None
    file: Optional[Dict[str, str]] = None
    annotations: List[Dict[str, Any]] = Field(default_factory=list)


class Message(BaseModel):
    """Message model"""

    role: str
    content: Union[str, List[ContentItem], None] = None
    name: Optional[str] = None
    tool_calls: Optional[List["ToolCall"]] = None
    tool_call_id: Optional[str] = None
    refusal: Optional[str] = None
    reasoning_content: Optional[str] = None
    audio: Optional[Dict[str, Any]] = None
    annotations: List[Dict[str, Any]] = Field(default_factory=list)

    @model_validator(mode="after")
    def normalize_role(self) -> "Message":
        """Normalize 'developer' role to 'system' for Gemini compatibility."""
        if self.role == "developer":
            self.role = "system"
        return self


class Choice(BaseModel):
    """Choice model"""

    index: int
    message: Message
    finish_reason: str
    logprobs: Optional[Dict[str, Any]] = None


class FunctionCall(BaseModel):
    """Function call payload"""

    name: str
    arguments: str


class ToolCall(BaseModel):
    """Tool call item"""

    id: str
    type: Literal["function"]
    function: FunctionCall


class ToolFunctionDefinition(BaseModel):
    """Function definition for tool."""

    name: str
    description: Optional[str] = None
    parameters: Optional[Dict[str, Any]] = None


class Tool(BaseModel):
    """Tool specification."""

    type: Literal["function"]
    function: ToolFunctionDefinition


class ToolChoiceFunctionDetail(BaseModel):
    """Detail of a tool choice function."""

    name: str


class ToolChoiceFunction(BaseModel):
    """Tool choice forcing a specific function."""

    type: Literal["function"]
    function: ToolChoiceFunctionDetail


class Usage(BaseModel):
    """Usage statistics model"""

    prompt_tokens: int
    completion_tokens: int
    total_tokens: int
    prompt_tokens_details: Optional[Dict[str, int]] = None
    completion_tokens_details: Optional[Dict[str, int]] = None


class ModelData(BaseModel):
    """Model data model"""

    id: str
    object: str = "model"
    created: int
    owned_by: str = "google"


class ChatCompletionRequest(BaseModel):
    """Chat completion request model"""

    model: str
    messages: List[Message]
    stream: Optional[bool] = False
    user: Optional[str] = None
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 1.0
    max_tokens: Optional[int] = None
    tools: Optional[List["Tool"]] = None
    tool_choice: Optional[
        Union[Literal["none"], Literal["auto"], Literal["required"], "ToolChoiceFunction"]
    ] = None
    response_format: Optional[Dict[str, Any]] = None


class ChatCompletionResponse(BaseModel):
    """Chat completion response model"""

    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[Choice]
    usage: Usage


class ModelListResponse(BaseModel):
    """Model list model"""

    object: str = "list"
    data: List[ModelData]


class HealthCheckResponse(BaseModel):
    """Health check response model"""

    ok: bool
    storage: Optional[Dict[str, str | int]] = None
    clients: Optional[Dict[str, bool]] = None
    error: Optional[str] = None


class ConversationInStore(BaseModel):
    """Conversation model for storing in the database."""

    created_at: Optional[datetime] = Field(default=None)
    updated_at: Optional[datetime] = Field(default=None)

    # Gemini Web API does not support changing models once a conversation is created.
    model: str = Field(..., description="Model used for the conversation")
    client_id: str = Field(..., description="Identifier of the Gemini client")
    metadata: list[str | None] = Field(
        ..., description="Metadata for Gemini API to locate the conversation"
    )
    messages: list[Message] = Field(..., description="Message contents in the conversation")


class ResponseInputContent(BaseModel):
    """Content item for Responses API input."""

    type: Literal["input_text", "input_image", "input_file"]
    text: Optional[str] = None
    image_url: Optional[str] = None
    detail: Optional[Literal["auto", "low", "high"]] = None
    file_url: Optional[str] = None
    file_data: Optional[str] = None
    filename: Optional[str] = None
    annotations: List[Dict[str, Any]] = Field(default_factory=list)

    @model_validator(mode="before")
    @classmethod
    def normalize_output_text(cls, data: Any) -> Any:
        """Allow output_text (from previous turns) to be treated as input_text."""
        if isinstance(data, dict) and data.get("type") == "output_text":
            data["type"] = "input_text"
        return data


class ResponseInputItem(BaseModel):
    """Single input item for Responses API."""

    type: Optional[Literal["message"]] = "message"
    role: Literal["user", "assistant", "system", "developer"]
    content: Union[str, List[ResponseInputContent]]


class ResponseToolChoice(BaseModel):
    """Tool choice enforcing a specific tool in Responses API."""

    type: Literal["function", "image_generation"]
    function: Optional[ToolChoiceFunctionDetail] = None


class ResponseImageTool(BaseModel):
    """Image generation tool specification for Responses API."""

    type: Literal["image_generation"]
    model: Optional[str] = None
    output_format: Optional[str] = None


class ResponseCreateRequest(BaseModel):
    """Responses API request payload."""

    model: str
    input: Union[str, List[ResponseInputItem]]
    instructions: Optional[Union[str, List[ResponseInputItem]]] = None
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 1.0
    max_output_tokens: Optional[int] = None
    stream: Optional[bool] = False
    tool_choice: Optional[Union[str, ResponseToolChoice]] = None
    tools: Optional[List[Union[Tool, ResponseImageTool]]] = None
    store: Optional[bool] = None
    user: Optional[str] = None
    response_format: Optional[Dict[str, Any]] = None
    metadata: Optional[Dict[str, Any]] = None


class ResponseUsage(BaseModel):
    """Usage statistics for Responses API."""

    input_tokens: int
    output_tokens: int
    total_tokens: int


class ResponseOutputContent(BaseModel):
    """Content item for Responses API output."""

    type: Literal["output_text"]
    text: Optional[str] = ""
    annotations: List[Dict[str, Any]] = Field(default_factory=list)


class ResponseOutputMessage(BaseModel):
    """Assistant message returned by Responses API."""

    id: str
    type: Literal["message"]
    role: Literal["assistant"]
    content: List[ResponseOutputContent]


class ResponseImageGenerationCall(BaseModel):
    """Image generation call record emitted in Responses API."""

    id: str
    type: Literal["image_generation_call"] = "image_generation_call"
    status: Literal["completed", "in_progress", "generating", "failed"] = "completed"
    result: Optional[str] = None
    output_format: Optional[str] = None
    size: Optional[str] = None
    revised_prompt: Optional[str] = None


class ResponseToolCall(BaseModel):
    """Tool call record emitted in Responses API."""

    id: str
    type: Literal["tool_call"] = "tool_call"
    status: Literal["in_progress", "completed", "failed", "requires_action"] = "completed"
    function: FunctionCall


class ResponseCreateResponse(BaseModel):
    """Responses API response payload."""

    id: str
    object: Literal["response"] = "response"
    created_at: int
    model: str
    output: List[Union[ResponseOutputMessage, ResponseImageGenerationCall, ResponseToolCall]]
    status: Literal[
        "in_progress",
        "completed",
        "failed",
        "incomplete",
        "cancelled",
        "requires_action",
    ] = "completed"
    tool_choice: Optional[Union[str, ResponseToolChoice]] = None
    tools: Optional[List[Union[Tool, ResponseImageTool]]] = None
    usage: ResponseUsage
    error: Optional[Dict[str, Any]] = None
    metadata: Optional[Dict[str, Any]] = None
    input: Optional[Union[str, List[ResponseInputItem]]] = None



class AnthropicMessageRequest(BaseModel):
    """Anthropic Messages API request model."""
    model: str
    messages: List[Message]
    max_tokens: int
    metadata: Optional[Dict[str, Any]] = None
    stop_sequences: Optional[List[str]] = None
    stream: Optional[bool] = False
    system: Optional[Union[str, List[Dict[str, str]]]] = None
    temperature: Optional[float] = 1.0
    tool_choice: Optional[Union[Dict[str, Any], Literal["auto", "any", "none"]]] = None
    tools: Optional[List[Dict[str, Any]]] = None
    top_k: Optional[int] = None
    top_p: Optional[float] = None


# Rebuild models with forward references
Message.model_rebuild()
ToolCall.model_rebuild()
ChatCompletionRequest.model_rebuild()
AnthropicMessageRequest.model_rebuild()