antigravity-proxy / protocol_converter.py
asemxin
Initial commit: Antigravity API Proxy
ec41d51
"""
协议转换器 - OpenAI 格式 <-> Gemini 格式
"""
from typing import Dict, Any, List
from models import OpenAIChatRequest, MODEL_MAPPING
def convert_openai_to_gemini(request: OpenAIChatRequest) -> Dict[str, Any]:
"""
将 OpenAI Chat Completion 请求转换为 Gemini 格式
OpenAI 格式:
{
"model": "gpt-4",
"messages": [
{"role": "system", "content": "You are..."},
{"role": "user", "content": "Hello"}
]
}
Gemini 格式:
{
"contents": [{"role": "user", "parts": [{"text": "Hello"}]}],
"systemInstruction": {"role": "user", "parts": [{"text": "You are..."}]},
"generationConfig": {...}
}
"""
contents = []
system_instruction = None
for msg in request.messages:
if msg.role == "system":
system_instruction = {
"role": "user",
"parts": [{"text": msg.content}]
}
elif msg.role == "user":
contents.append({
"role": "user",
"parts": [{"text": msg.content}]
})
elif msg.role == "assistant":
contents.append({
"role": "model",
"parts": [{"text": msg.content}]
})
# 如果没有 system instruction,使用空字符串
if system_instruction is None:
system_instruction = {"role": "user", "parts": [{"text": ""}]}
# Generation Config
generation_config = {
"temperature": request.temperature or 1.0,
"topP": request.top_p or 0.95,
"maxOutputTokens": request.max_tokens or 8192,
"candidateCount": 1,
}
# 检查是否需要启用思维链(thinking)
model_lower = request.model.lower()
if "thinking" in model_lower or "sonnet-3-7" in model_lower:
generation_config["thinkingConfig"] = {
"includeThoughts": True,
"thinkingBudget": 8191, # Google Protocol Limit < 8192
}
return {
"contents": contents,
"systemInstruction": system_instruction,
"generationConfig": generation_config,
}
def map_model_name(model: str) -> str:
"""
映射模型名称到 Gemini API 支持的名称
支持灵活匹配:
- 精确匹配: claude-sonnet-4-5 -> gemini-2.5-flash-preview
- 模糊匹配: 包含 opus -> gemini-2.5-pro-preview
"""
# 先尝试精确匹配
if model in MODEL_MAPPING:
return MODEL_MAPPING[model]
# 模糊匹配
model_lower = model.lower()
# Gemini 模型直通(添加 -preview 后缀如需要)
if model_lower.startswith("gemini-"):
if not model_lower.endswith("-preview"):
# 某些模型需要 -preview 后缀
if model_lower in ["gemini-3-flash", "gemini-3-pro", "gemini-2.5-pro", "gemini-2.5-flash"]:
return model + "-preview"
return model
# Claude 模型映射
if "opus" in model_lower:
return "gemini-2.5-pro-preview"
if "sonnet" in model_lower:
if "thinking" in model_lower:
return "gemini-2.5-pro-preview"
return "gemini-2.5-flash-preview"
if "haiku" in model_lower:
return "gemini-2.5-flash-lite-preview"
# 默认返回原模型名
return model
def convert_gemini_to_openai_chunk(gemini_data: Dict[str, Any], model: str) -> Dict[str, Any]:
"""
将 Gemini 流式响应转换为 OpenAI chunk 格式
Gemini 格式:
{
"candidates": [{
"content": {"parts": [{"text": "Hello"}]},
"finishReason": "STOP"
}]
}
OpenAI 格式:
{
"id": "chatcmpl-xxx",
"object": "chat.completion.chunk",
"choices": [{
"index": 0,
"delta": {"content": "Hello"},
"finish_reason": null
}]
}
"""
import uuid
from datetime import datetime
# 解析 Gemini 响应
candidates = gemini_data.get("candidates", [])
if not candidates:
# 可能是嵌套在 response 中
response = gemini_data.get("response", {})
candidates = response.get("candidates", [])
text = ""
finish_reason = None
is_thought = False
thought_signature = None
if candidates:
candidate = candidates[0]
content = candidate.get("content", {})
parts = content.get("parts", [])
if parts:
part = parts[0]
text = part.get("text", "")
is_thought = part.get("thought", False)
thought_signature = part.get("thoughtSignature")
# 转换结束原因
gemini_reason = candidate.get("finishReason")
if gemini_reason == "STOP":
finish_reason = "stop"
elif gemini_reason == "MAX_TOKENS":
finish_reason = "length"
elif gemini_reason == "SAFETY":
finish_reason = "content_filter"
# 构建 OpenAI chunk
delta = {"content": text}
if is_thought:
delta["thought"] = True
if thought_signature:
delta["thoughtSignature"] = thought_signature
return {
"id": gemini_data.get("responseId", f"chatcmpl-{uuid.uuid4().hex[:8]}"),
"object": "chat.completion.chunk",
"created": int(datetime.now().timestamp()),
"model": model,
"choices": [{
"index": 0,
"delta": delta,
"finish_reason": finish_reason
}]
}
def convert_gemini_to_openai_response(gemini_data: Dict[str, Any], model: str) -> Dict[str, Any]:
"""
将 Gemini 非流式响应转换为 OpenAI 格式
"""
import uuid
from datetime import datetime
candidates = gemini_data.get("candidates", [])
if not candidates:
response = gemini_data.get("response", {})
candidates = response.get("candidates", [])
text = ""
finish_reason = "stop"
if candidates:
candidate = candidates[0]
content = candidate.get("content", {})
parts = content.get("parts", [])
if parts:
text = parts[0].get("text", "")
gemini_reason = candidate.get("finishReason")
if gemini_reason == "MAX_TOKENS":
finish_reason = "length"
elif gemini_reason == "SAFETY":
finish_reason = "content_filter"
return {
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
"object": "chat.completion",
"created": int(datetime.now().timestamp()),
"model": model,
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": text
},
"finish_reason": finish_reason
}],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
}