File size: 6,971 Bytes
ec41d51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
"""
协议转换器 - OpenAI 格式 <-> Gemini 格式
"""
from typing import Dict, Any, List
from models import OpenAIChatRequest, MODEL_MAPPING


def convert_openai_to_gemini(request: OpenAIChatRequest) -> Dict[str, Any]:
    """
    将 OpenAI Chat Completion 请求转换为 Gemini 格式
    
    OpenAI 格式:
    {
        "model": "gpt-4",
        "messages": [
            {"role": "system", "content": "You are..."},
            {"role": "user", "content": "Hello"}
        ]
    }
    
    Gemini 格式:
    {
        "contents": [{"role": "user", "parts": [{"text": "Hello"}]}],
        "systemInstruction": {"role": "user", "parts": [{"text": "You are..."}]},
        "generationConfig": {...}
    }
    """
    contents = []
    system_instruction = None
    
    for msg in request.messages:
        if msg.role == "system":
            system_instruction = {
                "role": "user",
                "parts": [{"text": msg.content}]
            }
        elif msg.role == "user":
            contents.append({
                "role": "user",
                "parts": [{"text": msg.content}]
            })
        elif msg.role == "assistant":
            contents.append({
                "role": "model",
                "parts": [{"text": msg.content}]
            })
    
    # 如果没有 system instruction,使用空字符串
    if system_instruction is None:
        system_instruction = {"role": "user", "parts": [{"text": ""}]}
    
    # Generation Config
    generation_config = {
        "temperature": request.temperature or 1.0,
        "topP": request.top_p or 0.95,
        "maxOutputTokens": request.max_tokens or 8192,
        "candidateCount": 1,
    }
    
    # 检查是否需要启用思维链(thinking)
    model_lower = request.model.lower()
    if "thinking" in model_lower or "sonnet-3-7" in model_lower:
        generation_config["thinkingConfig"] = {
            "includeThoughts": True,
            "thinkingBudget": 8191,  # Google Protocol Limit < 8192
        }
    
    return {
        "contents": contents,
        "systemInstruction": system_instruction,
        "generationConfig": generation_config,
    }


def map_model_name(model: str) -> str:
    """
    映射模型名称到 Gemini API 支持的名称
    
    支持灵活匹配:
    - 精确匹配: claude-sonnet-4-5 -> gemini-2.5-flash-preview
    - 模糊匹配: 包含 opus -> gemini-2.5-pro-preview
    """
    # 先尝试精确匹配
    if model in MODEL_MAPPING:
        return MODEL_MAPPING[model]
    
    # 模糊匹配
    model_lower = model.lower()
    
    # Gemini 模型直通(添加 -preview 后缀如需要)
    if model_lower.startswith("gemini-"):
        if not model_lower.endswith("-preview"):
            # 某些模型需要 -preview 后缀
            if model_lower in ["gemini-3-flash", "gemini-3-pro", "gemini-2.5-pro", "gemini-2.5-flash"]:
                return model + "-preview"
        return model
    
    # Claude 模型映射
    if "opus" in model_lower:
        return "gemini-2.5-pro-preview"
    if "sonnet" in model_lower:
        if "thinking" in model_lower:
            return "gemini-2.5-pro-preview"
        return "gemini-2.5-flash-preview"
    if "haiku" in model_lower:
        return "gemini-2.5-flash-lite-preview"
    
    # 默认返回原模型名
    return model


def convert_gemini_to_openai_chunk(gemini_data: Dict[str, Any], model: str) -> Dict[str, Any]:
    """
    将 Gemini 流式响应转换为 OpenAI chunk 格式
    
    Gemini 格式:
    {
        "candidates": [{
            "content": {"parts": [{"text": "Hello"}]},
            "finishReason": "STOP"
        }]
    }
    
    OpenAI 格式:
    {
        "id": "chatcmpl-xxx",
        "object": "chat.completion.chunk",
        "choices": [{
            "index": 0,
            "delta": {"content": "Hello"},
            "finish_reason": null
        }]
    }
    """
    import uuid
    from datetime import datetime
    
    # 解析 Gemini 响应
    candidates = gemini_data.get("candidates", [])
    if not candidates:
        # 可能是嵌套在 response 中
        response = gemini_data.get("response", {})
        candidates = response.get("candidates", [])
    
    text = ""
    finish_reason = None
    is_thought = False
    thought_signature = None
    
    if candidates:
        candidate = candidates[0]
        content = candidate.get("content", {})
        parts = content.get("parts", [])
        
        if parts:
            part = parts[0]
            text = part.get("text", "")
            is_thought = part.get("thought", False)
            thought_signature = part.get("thoughtSignature")
        
        # 转换结束原因
        gemini_reason = candidate.get("finishReason")
        if gemini_reason == "STOP":
            finish_reason = "stop"
        elif gemini_reason == "MAX_TOKENS":
            finish_reason = "length"
        elif gemini_reason == "SAFETY":
            finish_reason = "content_filter"
    
    # 构建 OpenAI chunk
    delta = {"content": text}
    if is_thought:
        delta["thought"] = True
    if thought_signature:
        delta["thoughtSignature"] = thought_signature
    
    return {
        "id": gemini_data.get("responseId", f"chatcmpl-{uuid.uuid4().hex[:8]}"),
        "object": "chat.completion.chunk",
        "created": int(datetime.now().timestamp()),
        "model": model,
        "choices": [{
            "index": 0,
            "delta": delta,
            "finish_reason": finish_reason
        }]
    }


def convert_gemini_to_openai_response(gemini_data: Dict[str, Any], model: str) -> Dict[str, Any]:
    """
    将 Gemini 非流式响应转换为 OpenAI 格式
    """
    import uuid
    from datetime import datetime
    
    candidates = gemini_data.get("candidates", [])
    if not candidates:
        response = gemini_data.get("response", {})
        candidates = response.get("candidates", [])
    
    text = ""
    finish_reason = "stop"
    
    if candidates:
        candidate = candidates[0]
        content = candidate.get("content", {})
        parts = content.get("parts", [])
        
        if parts:
            text = parts[0].get("text", "")
        
        gemini_reason = candidate.get("finishReason")
        if gemini_reason == "MAX_TOKENS":
            finish_reason = "length"
        elif gemini_reason == "SAFETY":
            finish_reason = "content_filter"
    
    return {
        "id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
        "object": "chat.completion",
        "created": int(datetime.now().timestamp()),
        "model": model,
        "choices": [{
            "index": 0,
            "message": {
                "role": "assistant",
                "content": text
            },
            "finish_reason": finish_reason
        }],
        "usage": {
            "prompt_tokens": 0,
            "completion_tokens": 0,
            "total_tokens": 0
        }
    }