File size: 10,494 Bytes
69fec20 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 |
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Field
# Pydantic v1/v2 兼容性辅助函数
def model_to_dict(model: BaseModel) -> Dict[str, Any]:
"""
兼容 Pydantic v1 和 v2 的模型转字典方法
- v1: model.dict()
- v2: model.model_dump()
"""
if hasattr(model, 'model_dump'):
# Pydantic v2
return model.model_dump()
else:
# Pydantic v1
return model.dict()
# Common Models
class Model(BaseModel):
id: str
object: str = "model"
created: Optional[int] = None
owned_by: Optional[str] = "google"
class ModelList(BaseModel):
object: str = "list"
data: List[Model]
# OpenAI Models
class OpenAIToolFunction(BaseModel):
name: str
arguments: str # JSON string
class OpenAIToolCall(BaseModel):
id: str
type: str = "function"
function: OpenAIToolFunction
class OpenAITool(BaseModel):
type: str = "function"
function: Dict[str, Any]
class OpenAIChatMessage(BaseModel):
role: str
content: Union[str, List[Dict[str, Any]], None] = None
reasoning_content: Optional[str] = None
name: Optional[str] = None
tool_calls: Optional[List[OpenAIToolCall]] = None
tool_call_id: Optional[str] = None # for role="tool"
class OpenAIChatCompletionRequest(BaseModel):
model: str
messages: List[OpenAIChatMessage]
stream: bool = False
temperature: Optional[float] = Field(None, ge=0.0, le=2.0)
top_p: Optional[float] = Field(None, ge=0.0, le=1.0)
max_tokens: Optional[int] = Field(None, ge=1)
stop: Optional[Union[str, List[str]]] = None
frequency_penalty: Optional[float] = Field(None, ge=-2.0, le=2.0)
presence_penalty: Optional[float] = Field(None, ge=-2.0, le=2.0)
n: Optional[int] = Field(1, ge=1, le=128)
seed: Optional[int] = None
response_format: Optional[Dict[str, Any]] = None
top_k: Optional[int] = Field(None, ge=1)
tools: Optional[List[OpenAITool]] = None
tool_choice: Optional[Union[str, Dict[str, Any]]] = None
class Config:
extra = "allow" # Allow additional fields not explicitly defined
# 通用的聊天完成请求模型(兼容OpenAI和其他格式)
ChatCompletionRequest = OpenAIChatCompletionRequest
class OpenAIChatCompletionChoice(BaseModel):
index: int
message: OpenAIChatMessage
finish_reason: Optional[str] = None
logprobs: Optional[Dict[str, Any]] = None
class OpenAIChatCompletionResponse(BaseModel):
id: str
object: str = "chat.completion"
created: int
model: str
choices: List[OpenAIChatCompletionChoice]
usage: Optional[Dict[str, int]] = None
system_fingerprint: Optional[str] = None
class OpenAIDelta(BaseModel):
role: Optional[str] = None
content: Optional[str] = None
reasoning_content: Optional[str] = None
class OpenAIChatCompletionStreamChoice(BaseModel):
index: int
delta: OpenAIDelta
finish_reason: Optional[str] = None
logprobs: Optional[Dict[str, Any]] = None
class OpenAIChatCompletionStreamResponse(BaseModel):
id: str
object: str = "chat.completion.chunk"
created: int
model: str
choices: List[OpenAIChatCompletionStreamChoice]
system_fingerprint: Optional[str] = None
# Gemini Models
class GeminiPart(BaseModel):
text: Optional[str] = None
inlineData: Optional[Dict[str, Any]] = None
fileData: Optional[Dict[str, Any]] = None
thought: Optional[bool] = False
class GeminiContent(BaseModel):
role: str
parts: List[GeminiPart]
class GeminiSystemInstruction(BaseModel):
parts: List[GeminiPart]
class GeminiImageConfig(BaseModel):
"""图片生成配置"""
aspect_ratio: Optional[str] = None # "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"
image_size: Optional[str] = None # "1K", "2K", "4K"
class GeminiGenerationConfig(BaseModel):
temperature: Optional[float] = Field(None, ge=0.0, le=2.0)
topP: Optional[float] = Field(None, ge=0.0, le=1.0)
topK: Optional[int] = Field(None, ge=1)
maxOutputTokens: Optional[int] = Field(None, ge=1)
stopSequences: Optional[List[str]] = None
responseMimeType: Optional[str] = None
responseSchema: Optional[Dict[str, Any]] = None
candidateCount: Optional[int] = Field(None, ge=1, le=8)
seed: Optional[int] = None
frequencyPenalty: Optional[float] = Field(None, ge=-2.0, le=2.0)
presencePenalty: Optional[float] = Field(None, ge=-2.0, le=2.0)
thinkingConfig: Optional[Dict[str, Any]] = None
# 图片生成相关参数
response_modalities: Optional[List[str]] = None # ["TEXT", "IMAGE"]
image_config: Optional[GeminiImageConfig] = None
class GeminiSafetySetting(BaseModel):
category: str
threshold: str
class GeminiRequest(BaseModel):
contents: List[GeminiContent]
systemInstruction: Optional[GeminiSystemInstruction] = None
generationConfig: Optional[GeminiGenerationConfig] = None
safetySettings: Optional[List[GeminiSafetySetting]] = None
tools: Optional[List[Dict[str, Any]]] = None
toolConfig: Optional[Dict[str, Any]] = None
cachedContent: Optional[str] = None
class Config:
extra = "allow" # 允许透传未定义的字段
class GeminiCandidate(BaseModel):
content: GeminiContent
finishReason: Optional[str] = None
index: int = 0
safetyRatings: Optional[List[Dict[str, Any]]] = None
citationMetadata: Optional[Dict[str, Any]] = None
tokenCount: Optional[int] = None
class GeminiUsageMetadata(BaseModel):
promptTokenCount: Optional[int] = None
candidatesTokenCount: Optional[int] = None
totalTokenCount: Optional[int] = None
class GeminiResponse(BaseModel):
candidates: List[GeminiCandidate]
usageMetadata: Optional[GeminiUsageMetadata] = None
modelVersion: Optional[str] = None
# Claude Models
class ClaudeContentBlock(BaseModel):
type: str # "text", "image", "tool_use", "tool_result"
text: Optional[str] = None
source: Optional[Dict[str, Any]] = None # for image type
id: Optional[str] = None # for tool_use
name: Optional[str] = None # for tool_use
input: Optional[Dict[str, Any]] = None # for tool_use
tool_use_id: Optional[str] = None # for tool_result
content: Optional[Union[str, List[Dict[str, Any]]]] = None # for tool_result
class ClaudeMessage(BaseModel):
role: str # "user" or "assistant"
content: Union[str, List[ClaudeContentBlock]]
class ClaudeTool(BaseModel):
name: str
description: Optional[str] = None
input_schema: Dict[str, Any]
class ClaudeMetadata(BaseModel):
user_id: Optional[str] = None
class ClaudeRequest(BaseModel):
model: str
messages: List[ClaudeMessage]
max_tokens: int = Field(..., ge=1)
system: Optional[Union[str, List[Dict[str, Any]]]] = None
temperature: Optional[float] = Field(None, ge=0.0, le=1.0)
top_p: Optional[float] = Field(None, ge=0.0, le=1.0)
top_k: Optional[int] = Field(None, ge=1)
stop_sequences: Optional[List[str]] = None
stream: bool = False
metadata: Optional[ClaudeMetadata] = None
tools: Optional[List[ClaudeTool]] = None
tool_choice: Optional[Union[str, Dict[str, Any]]] = None
class Config:
extra = "allow"
class ClaudeUsage(BaseModel):
input_tokens: int
output_tokens: int
class ClaudeResponse(BaseModel):
id: str
type: str = "message"
role: str = "assistant"
content: List[ClaudeContentBlock]
model: str
stop_reason: Optional[str] = None
stop_sequence: Optional[str] = None
usage: ClaudeUsage
class ClaudeStreamEvent(BaseModel):
type: str # "message_start", "content_block_start", "content_block_delta", "content_block_stop", "message_delta", "message_stop"
message: Optional[ClaudeResponse] = None
index: Optional[int] = None
content_block: Optional[ClaudeContentBlock] = None
delta: Optional[Dict[str, Any]] = None
usage: Optional[ClaudeUsage] = None
class Config:
extra = "allow"
# Error Models
class APIError(BaseModel):
message: str
type: str = "api_error"
code: Optional[int] = None
class ErrorResponse(BaseModel):
error: APIError
# Control Panel Models
class SystemStatus(BaseModel):
status: str
timestamp: str
credentials: Dict[str, int]
config: Dict[str, Any]
current_credential: str
class CredentialInfo(BaseModel):
filename: str
project_id: Optional[str] = None
status: Dict[str, Any]
size: Optional[int] = None
modified_time: Optional[str] = None
error: Optional[str] = None
class LogEntry(BaseModel):
timestamp: str
level: str
message: str
module: Optional[str] = None
class ConfigValue(BaseModel):
key: str
value: Any
env_locked: bool = False
description: Optional[str] = None
# Authentication Models
class AuthRequest(BaseModel):
project_id: Optional[str] = None
user_session: Optional[str] = None
class AuthResponse(BaseModel):
success: bool
auth_url: Optional[str] = None
state: Optional[str] = None
error: Optional[str] = None
credentials: Optional[Dict[str, Any]] = None
file_path: Optional[str] = None
requires_manual_project_id: Optional[bool] = None
requires_project_selection: Optional[bool] = None
available_projects: Optional[List[Dict[str, str]]] = None
class CredentialStatus(BaseModel):
disabled: bool = False
error_codes: List[int] = []
last_success: Optional[str] = None
# Web Routes Models
class LoginRequest(BaseModel):
password: str
class AuthStartRequest(BaseModel):
project_id: Optional[str] = None # 现在是可选的
mode: Optional[str] = "geminicli" # 凭证模式: geminicli 或 antigravity
class AuthCallbackRequest(BaseModel):
project_id: Optional[str] = None # 现在是可选的
mode: Optional[str] = "geminicli" # 凭证模式: geminicli 或 antigravity
class AuthCallbackUrlRequest(BaseModel):
callback_url: str # OAuth回调完整URL
project_id: Optional[str] = None # 可选的项目ID
mode: Optional[str] = "geminicli" # 凭证模式: geminicli 或 antigravity
class CredFileActionRequest(BaseModel):
filename: str
action: str # enable, disable, delete
class CredFileBatchActionRequest(BaseModel):
action: str # "enable", "disable", "delete"
filenames: List[str] # 批量操作的文件名列表
class ConfigSaveRequest(BaseModel):
config: dict
|