| """
|
| 协议转换器 - OpenAI 格式 <-> Gemini 格式
|
| """
|
| from typing import Dict, Any, List
|
| from models import OpenAIChatRequest, MODEL_MAPPING
|
|
|
|
|
| def convert_openai_to_gemini(request: OpenAIChatRequest) -> Dict[str, Any]:
|
| """
|
| 将 OpenAI Chat Completion 请求转换为 Gemini 格式
|
|
|
| OpenAI 格式:
|
| {
|
| "model": "gpt-4",
|
| "messages": [
|
| {"role": "system", "content": "You are..."},
|
| {"role": "user", "content": "Hello"}
|
| ]
|
| }
|
|
|
| Gemini 格式:
|
| {
|
| "contents": [{"role": "user", "parts": [{"text": "Hello"}]}],
|
| "systemInstruction": {"role": "user", "parts": [{"text": "You are..."}]},
|
| "generationConfig": {...}
|
| }
|
| """
|
| contents = []
|
| system_instruction = None
|
|
|
| for msg in request.messages:
|
| if msg.role == "system":
|
| system_instruction = {
|
| "role": "user",
|
| "parts": [{"text": msg.content}]
|
| }
|
| elif msg.role == "user":
|
| contents.append({
|
| "role": "user",
|
| "parts": [{"text": msg.content}]
|
| })
|
| elif msg.role == "assistant":
|
| contents.append({
|
| "role": "model",
|
| "parts": [{"text": msg.content}]
|
| })
|
|
|
|
|
| if system_instruction is None:
|
| system_instruction = {"role": "user", "parts": [{"text": ""}]}
|
|
|
|
|
| generation_config = {
|
| "temperature": request.temperature or 1.0,
|
| "topP": request.top_p or 0.95,
|
| "maxOutputTokens": request.max_tokens or 8192,
|
| "candidateCount": 1,
|
| }
|
|
|
|
|
| model_lower = request.model.lower()
|
| if "thinking" in model_lower or "sonnet-3-7" in model_lower:
|
| generation_config["thinkingConfig"] = {
|
| "includeThoughts": True,
|
| "thinkingBudget": 8191,
|
| }
|
|
|
| return {
|
| "contents": contents,
|
| "systemInstruction": system_instruction,
|
| "generationConfig": generation_config,
|
| }
|
|
|
|
|
| def map_model_name(model: str) -> str:
|
| """
|
| 映射模型名称到 Gemini API 支持的名称
|
|
|
| 支持灵活匹配:
|
| - 精确匹配: claude-sonnet-4-5 -> gemini-2.5-flash-preview
|
| - 模糊匹配: 包含 opus -> gemini-2.5-pro-preview
|
| """
|
|
|
| if model in MODEL_MAPPING:
|
| return MODEL_MAPPING[model]
|
|
|
|
|
| model_lower = model.lower()
|
|
|
|
|
| if model_lower.startswith("gemini-"):
|
| if not model_lower.endswith("-preview"):
|
|
|
| if model_lower in ["gemini-3-flash", "gemini-3-pro", "gemini-2.5-pro", "gemini-2.5-flash"]:
|
| return model + "-preview"
|
| return model
|
|
|
|
|
| if "opus" in model_lower:
|
| return "gemini-2.5-pro-preview"
|
| if "sonnet" in model_lower:
|
| if "thinking" in model_lower:
|
| return "gemini-2.5-pro-preview"
|
| return "gemini-2.5-flash-preview"
|
| if "haiku" in model_lower:
|
| return "gemini-2.5-flash-lite-preview"
|
|
|
|
|
| return model
|
|
|
|
|
| def convert_gemini_to_openai_chunk(gemini_data: Dict[str, Any], model: str) -> Dict[str, Any]:
|
| """
|
| 将 Gemini 流式响应转换为 OpenAI chunk 格式
|
|
|
| Gemini 格式:
|
| {
|
| "candidates": [{
|
| "content": {"parts": [{"text": "Hello"}]},
|
| "finishReason": "STOP"
|
| }]
|
| }
|
|
|
| OpenAI 格式:
|
| {
|
| "id": "chatcmpl-xxx",
|
| "object": "chat.completion.chunk",
|
| "choices": [{
|
| "index": 0,
|
| "delta": {"content": "Hello"},
|
| "finish_reason": null
|
| }]
|
| }
|
| """
|
| import uuid
|
| from datetime import datetime
|
|
|
|
|
| candidates = gemini_data.get("candidates", [])
|
| if not candidates:
|
|
|
| response = gemini_data.get("response", {})
|
| candidates = response.get("candidates", [])
|
|
|
| text = ""
|
| finish_reason = None
|
| is_thought = False
|
| thought_signature = None
|
|
|
| if candidates:
|
| candidate = candidates[0]
|
| content = candidate.get("content", {})
|
| parts = content.get("parts", [])
|
|
|
| if parts:
|
| part = parts[0]
|
| text = part.get("text", "")
|
| is_thought = part.get("thought", False)
|
| thought_signature = part.get("thoughtSignature")
|
|
|
|
|
| gemini_reason = candidate.get("finishReason")
|
| if gemini_reason == "STOP":
|
| finish_reason = "stop"
|
| elif gemini_reason == "MAX_TOKENS":
|
| finish_reason = "length"
|
| elif gemini_reason == "SAFETY":
|
| finish_reason = "content_filter"
|
|
|
|
|
| delta = {"content": text}
|
| if is_thought:
|
| delta["thought"] = True
|
| if thought_signature:
|
| delta["thoughtSignature"] = thought_signature
|
|
|
| return {
|
| "id": gemini_data.get("responseId", f"chatcmpl-{uuid.uuid4().hex[:8]}"),
|
| "object": "chat.completion.chunk",
|
| "created": int(datetime.now().timestamp()),
|
| "model": model,
|
| "choices": [{
|
| "index": 0,
|
| "delta": delta,
|
| "finish_reason": finish_reason
|
| }]
|
| }
|
|
|
|
|
| def convert_gemini_to_openai_response(gemini_data: Dict[str, Any], model: str) -> Dict[str, Any]:
|
| """
|
| 将 Gemini 非流式响应转换为 OpenAI 格式
|
| """
|
| import uuid
|
| from datetime import datetime
|
|
|
| candidates = gemini_data.get("candidates", [])
|
| if not candidates:
|
| response = gemini_data.get("response", {})
|
| candidates = response.get("candidates", [])
|
|
|
| text = ""
|
| finish_reason = "stop"
|
|
|
| if candidates:
|
| candidate = candidates[0]
|
| content = candidate.get("content", {})
|
| parts = content.get("parts", [])
|
|
|
| if parts:
|
| text = parts[0].get("text", "")
|
|
|
| gemini_reason = candidate.get("finishReason")
|
| if gemini_reason == "MAX_TOKENS":
|
| finish_reason = "length"
|
| elif gemini_reason == "SAFETY":
|
| finish_reason = "content_filter"
|
|
|
| return {
|
| "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
| "object": "chat.completion",
|
| "created": int(datetime.now().timestamp()),
|
| "model": model,
|
| "choices": [{
|
| "index": 0,
|
| "message": {
|
| "role": "assistant",
|
| "content": text
|
| },
|
| "finish_reason": finish_reason
|
| }],
|
| "usage": {
|
| "prompt_tokens": 0,
|
| "completion_tokens": 0,
|
| "total_tokens": 0
|
| }
|
| }
|
|
|