xidu commited on
Commit
1c0114c
·
1 Parent(s): ddc83ee

Deploy Go API to pro1 Space

Browse files
Files changed (4) hide show
  1. Dockerfile +7 -2
  2. README.md +9 -9
  3. go.mod +1 -1
  4. main.go +149 -43
Dockerfile CHANGED
@@ -4,13 +4,18 @@ FROM golang:1.21-alpine AS builder
4
  WORKDIR /app
5
 
6
  # 复制go.mod和go.sum文件
7
- # 将模块名从 goapi 改为 pro1
8
  COPY go.mod ./
9
- RUN go mod edit -module pro1 && go mod tidy
 
 
 
10
 
11
  # 复制源代码
12
  COPY main.go ./
13
 
 
 
 
14
  # 构建应用
15
  RUN CGO_ENABLED=0 GOOS=linux go build -o /go-api
16
 
 
4
  WORKDIR /app
5
 
6
  # 复制go.mod和go.sum文件
 
7
  COPY go.mod ./
8
+
9
+ # (此步骤在下一步的 go mod tidy 中已包含,为保持清晰而保留)
10
+ # 先下载依赖,可以利用Docker的层缓存
11
+ RUN go mod download
12
 
13
  # 复制源代码
14
  COPY main.go ./
15
 
16
+ # tidy会确保go.sum文件是最新的,并移除不用的依赖
17
+ RUN go mod tidy
18
+
19
  # 构建应用
20
  RUN CGO_ENABLED=0 GOOS=linux go build -o /go-api
21
 
README.md CHANGED
@@ -1,16 +1,16 @@
1
  ---
2
- title: Gemini 2.5 Pro API Service
3
- emoji: 💎
4
- colorFrom: indigo
5
- colorTo: purple
6
  sdk: docker
7
  app_port: 7860
8
  ---
9
 
10
- This Space runs a high-performance API proxy for Google's Gemini, built in Go.
11
- It is specifically configured to **only support the `gemini-2.5-pro` model**.
12
 
13
  ### Endpoints
14
- - **`POST /v1/chat/completions`**: Main endpoint for chat.
15
- - **`GET /v1/models`**: Lists available models (only gemini-2.5-pro).
16
- - **`GET /health`**: Health check.
 
 
1
  ---
2
+ title: Pro1
3
+ emoji: 🚀
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: docker
7
  app_port: 7860
8
  ---
9
 
10
+ This Space runs a high-performance API proxy for Google's Gemini, built entirely in Go.
 
11
 
12
  ### Endpoints
13
+ - **`POST /v1/chat/completions`**: Main endpoint for chat, supports streaming & non-streaming.
14
+ - **`GET /v1/models`**: Lists available models.
15
+ - **`GET /health`**: Health check.
16
+ - **`GET /`**: API Info.
go.mod CHANGED
@@ -1,4 +1,4 @@
1
- module pro1
2
 
3
  go 1.21
4
 
 
1
+ module goapi
2
 
3
  go 1.21
4
 
main.go CHANGED
@@ -27,8 +27,22 @@ var apiKeys = []string{
27
  // 在此添加更多密钥
28
  }
29
 
30
- // 定义支持的模型信息 (已根据要求修改)
31
  var supportedModels = []ModelInfo{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  {
33
  ID: "gemini-2.5-pro",
34
  Object: "model",
@@ -38,17 +52,32 @@ var supportedModels = []ModelInfo{
38
  },
39
  }
40
 
41
- // 将OpenAI模型名称映射到Gemini模型名称 (已根据要求修改)
 
42
  var modelMapping = map[string]string{
43
- "gemini-2.5-pro": "gemini-2.5-pro",
 
 
44
  }
45
 
46
  // 配置安全设置 (全部禁用)
47
  var safetySettings = []*genai.SafetySetting{
48
- { Category: genai.HarmCategoryHarassment, Threshold: genai.HarmBlockNone },
49
- { Category: genai.HarmCategoryHateSpeech, Threshold: genai.HarmBlockNone },
50
- { Category: genai.HarmCategorySexuallyExplicit, Threshold: genai.HarmBlockNone },
51
- { Category: genai.HarmCategoryDangerousContent, Threshold: genai.HarmBlockNone },
 
 
 
 
 
 
 
 
 
 
 
 
52
  }
53
 
54
  const maxRetries = 3
@@ -127,25 +156,30 @@ func getRandomAPIKey() string {
127
  return apiKeys[r.Intn(len(apiKeys))]
128
  }
129
 
 
130
  func convertMessages(messages []ChatMessage) (history []*genai.Content, lastPrompt []genai.Part, systemInstruction *genai.Content) {
131
  if len(messages) == 0 {
132
  return nil, nil, nil
133
  }
 
134
  for i, msg := range messages {
135
  var role string
136
  if msg.Role == "system" {
137
  systemInstruction = &genai.Content{Parts: []genai.Part{genai.Text(msg.Content)}}
138
  continue
139
  }
 
140
  if i == len(messages)-1 && msg.Role == "user" {
141
  lastPrompt = append(lastPrompt, genai.Text(msg.Content))
142
  continue
143
  }
 
144
  if msg.Role == "assistant" {
145
  role = "model"
146
  } else {
147
  role = "user"
148
  }
 
149
  history = append(history, &genai.Content{
150
  Role: role,
151
  Parts: []genai.Part{genai.Text(msg.Content)},
@@ -159,32 +193,34 @@ func chatCompletionsHandler(w http.ResponseWriter, r *http.Request) {
159
  http.Error(w, "仅支持POST方法", http.StatusMethodNotAllowed)
160
  return
161
  }
 
162
  var req ChatCompletionRequest
163
  if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
164
  http.Error(w, fmt.Sprintf("解析请求体失败: %v", err), http.StatusBadRequest)
165
  return
166
  }
167
 
168
- geminiModel, ok := modelMapping[req.Model]
169
- if !ok {
170
- http.Error(w, fmt.Sprintf("不支持的模型: %s. 此接口仅支持 'gemini-2.5-pro'", req.Model), http.StatusBadRequest)
171
- return
172
- }
173
- log.Printf("接收到模型请求: '%s', 将使用映射后的模型: '%s'", req.Model, geminiModel)
174
 
175
  history, lastPrompt, systemInstruction := convertMessages(req.Messages)
 
176
  var lastErr error
177
  usedKeys := make(map[string]bool)
178
 
179
  for i := 0; i < maxRetries; i++ {
180
  ctx := context.Background()
181
  apiKey := getRandomAPIKey()
 
182
  if len(usedKeys) < len(apiKeys) {
183
  for usedKeys[apiKey] {
184
  apiKey = getRandomAPIKey()
185
  }
186
  }
187
  usedKeys[apiKey] = true
 
188
  log.Printf("尝试第 %d 次, 使用密钥: ...%s", i+1, apiKey[len(apiKey)-4:])
189
 
190
  client, err := genai.NewClient(ctx, option.WithAPIKey(apiKey))
@@ -195,7 +231,7 @@ func chatCompletionsHandler(w http.ResponseWriter, r *http.Request) {
195
  }
196
  defer client.Close()
197
 
198
- model := client.GenerativeModel(geminiModel)
199
  model.SystemInstruction = systemInstruction
200
  model.SafetySettings = safetySettings
201
  model.SetTemperature(req.Temperature)
@@ -216,10 +252,12 @@ func chatCompletionsHandler(w http.ResponseWriter, r *http.Request) {
216
  if err == nil {
217
  return
218
  }
 
219
  lastErr = err
220
  log.Printf("第 %d 次尝试失败: %v", i+1, err)
221
  time.Sleep(1 * time.Second)
222
  }
 
223
  http.Error(w, fmt.Sprintf("所有重试均失败: %v", lastErr), http.StatusInternalServerError)
224
  }
225
 
@@ -231,8 +269,12 @@ func handleStream(w http.ResponseWriter, ctx context.Context, chat *genai.ChatSe
231
  iter := chat.SendMessageStream(ctx, prompt...)
232
  for {
233
  resp, err := iter.Next()
234
- if err == iterator.Done { break }
235
- if err != nil { return fmt.Errorf("流式生成内容失败: %v", err) }
 
 
 
 
236
 
237
  var contentBuilder strings.Builder
238
  for _, part := range resp.Candidates[0].Content.Parts {
@@ -246,61 +288,111 @@ func handleStream(w http.ResponseWriter, ctx context.Context, chat *genai.ChatSe
246
  Object: "chat.completion.chunk",
247
  Created: time.Now().Unix(),
248
  Model: modelID,
249
- Choices: []StreamChoice{{ Index: 0, Delta: ChatMessage{ Role: "assistant", Content: contentBuilder.String() }}},
 
 
 
 
 
 
 
 
250
  }
 
251
  var buf bytes.Buffer
252
- if err := json.NewEncoder(&buf).Encode(chunk); err != nil { return fmt.Errorf("序列化流式块失败: %v", err) }
 
 
 
253
  fmt.Fprintf(w, "data: %s\n\n", buf.String())
254
- if flusher, ok := w.(http.Flusher); ok { flusher.Flush() }
 
 
255
  }
256
 
257
  finishReason := "stop"
258
  doneChunk := ChatCompletionStreamResponse{
259
- ID: fmt.Sprintf("chatcmpl-%d-done", time.Now().Unix()),
260
- Object: "chat.completion.chunk",
261
  Created: time.Now().Unix(),
262
- Model: modelID,
263
- Choices: []StreamChoice{{ Index: 0, FinishReason: &finishReason }},
 
 
 
 
 
264
  }
265
  var buf bytes.Buffer
266
  json.NewEncoder(&buf).Encode(doneChunk)
267
  fmt.Fprintf(w, "data: %s\n\n", buf.String())
268
  fmt.Fprintf(w, "data: [DONE]\n\n")
269
- if flusher, ok := w.(http.Flusher); ok { flusher.Flush() }
 
 
 
270
  return nil
271
  }
272
 
273
  func handleNonStream(w http.ResponseWriter, ctx context.Context, model *genai.GenerativeModel, chat *genai.ChatSession, prompt []genai.Part, modelID string) error {
274
  resp, err := chat.SendMessage(ctx, prompt...)
275
- if err != nil { return fmt.Errorf("生成内容失败: %v", err) }
 
 
276
 
277
  var contentBuilder strings.Builder
278
  if len(resp.Candidates) > 0 && resp.Candidates[0].Content != nil {
279
  for _, part := range resp.Candidates[0].Content.Parts {
280
- if txt, ok := part.(genai.Text); ok { contentBuilder.WriteString(string(txt)) }
 
 
281
  }
282
  }
283
 
 
284
  var promptParts []genai.Part
285
- for _, c := range chat.History { promptParts = append(promptParts, c.Parts...) }
 
 
286
  promptParts = append(promptParts, prompt...)
 
287
  promptTokenCount, err := model.CountTokens(ctx, promptParts...)
288
- if err != nil { return fmt.Errorf("计算prompt tokens失败: %v", err) }
 
 
 
289
  completionTokenCount, err := model.CountTokens(ctx, resp.Candidates[0].Content.Parts...)
290
- if err != nil { return fmt.Errorf("计算completion tokens失败: %v", err) }
 
 
291
 
292
  response := ChatCompletionResponse{
293
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
294
  Object: "chat.completion",
295
  Created: time.Now().Unix(),
296
  Model: modelID,
297
- Choices: []Choice{{ Index: 0, Message: ChatMessage{ Role: "assistant", Content: contentBuilder.String() }, FinishReason: "stop" }},
298
- Usage: Usage{ PromptTokens: int(promptTokenCount.TotalTokens), CompletionTokens: int(completionTokenCount.TotalTokens), TotalTokens: int(promptTokenCount.TotalTokens) + int(completionTokenCount.TotalTokens) },
 
 
 
 
 
 
 
 
 
 
 
 
 
299
  }
 
300
  w.Header().Set("Content-Type", "application/json")
301
  return json.NewEncoder(w).Encode(response)
302
  }
303
 
 
304
  func modelsHandler(w http.ResponseWriter, r *http.Request) {
305
  resp := ModelListResponse{
306
  Object: "list",
@@ -312,10 +404,14 @@ func modelsHandler(w http.ResponseWriter, r *http.Request) {
312
 
313
  func rootHandler(w http.ResponseWriter, r *http.Request) {
314
  info := map[string]interface{}{
315
- "name": "Gemini 2.5 Pro API Service (Go Version)",
316
- "version": "1.3.1",
317
- "description": "Google Gemini 官方API接口服务,已配置为仅支持 gemini-2.5-pro",
318
- "endpoints": map[string]string{ "models": "/v1/models", "chat": "/v1/chat/completions", "health": "/health" },
 
 
 
 
319
  }
320
  w.Header().Set("Content-Type", "application/json")
321
  json.NewEncoder(w).Encode(info)
@@ -323,13 +419,15 @@ func rootHandler(w http.ResponseWriter, r *http.Request) {
323
 
324
  func healthHandler(w http.ResponseWriter, r *http.Request) {
325
  var modelIDs []string
326
- for _, m := range supportedModels { modelIDs = append(modelIDs, m.ID) }
 
 
327
  health := map[string]interface{}{
328
- "status": "healthy",
329
- "timestamp": time.Now().Unix(),
330
- "api": "gemini-official-go",
331
  "available_models": modelIDs,
332
- "version": "1.3.1",
333
  }
334
  w.Header().Set("Content-Type", "application/json")
335
  json.NewEncoder(w).Encode(health)
@@ -337,11 +435,13 @@ func healthHandler(w http.ResponseWriter, r *http.Request) {
337
 
338
  func main() {
339
  mux := http.NewServeMux()
 
340
  mux.HandleFunc("/", rootHandler)
341
  mux.HandleFunc("/health", healthHandler)
342
  mux.HandleFunc("/v1/models", modelsHandler)
343
  mux.HandleFunc("/v1/chat/completions", chatCompletionsHandler)
344
-
 
345
  c := cors.New(cors.Options{
346
  AllowedOrigins: []string{"*"},
347
  AllowedMethods: []string{"GET", "POST", "OPTIONS"},
@@ -351,8 +451,14 @@ func main() {
351
  handler := c.Handler(mux)
352
 
353
  port := "7860"
354
- log.Println("🚀 启动 Gemini 2.5 Pro API 服务器 (Go 版本)")
355
- log.Printf("📊 支持的唯一模型: %v", supportedModels[0].ID)
 
 
 
 
 
 
356
  log.Printf("🔑 已配置 %d 个API密钥", len(apiKeys))
357
  log.Println("🔄 支持自动重试和密钥轮换")
358
  log.Printf("🔗 服务器正在监听 http://0.0.0.0:%s", port)
 
27
  // 在此添加更多密钥
28
  }
29
 
30
+ // 定义支持的模型信息
31
  var supportedModels = []ModelInfo{
32
+ {
33
+ ID: "gemini-2.5-flash-preview-05-20",
34
+ Object: "model",
35
+ Created: time.Now().Unix(),
36
+ OwnedBy: "google",
37
+ Description: "Gemini 2.5 Flash Preview - 最新实验性模型",
38
+ },
39
+ {
40
+ ID: "gemini-2.5-flash",
41
+ Object: "model",
42
+ Created: time.Now().Unix(),
43
+ OwnedBy: "google",
44
+ Description: "gemini-2.5-flash稳定经典专业模型",
45
+ },
46
  {
47
  ID: "gemini-2.5-pro",
48
  Object: "model",
 
52
  },
53
  }
54
 
55
+ // 将OpenAI模型名称映射到Gemini模型名称
56
+ // 根据您的要求,键和值现在是相同的,不做任何转换。
57
  var modelMapping = map[string]string{
58
+ "gemini-2.5-flash-preview-05-20": "gemini-2.5-flash-preview-05-20",
59
+ "gemini-2.5-flash": "gemini-2.5-flash",
60
+ "gemini-2.5-pro": "gemini-2.5-pro",
61
  }
62
 
63
  // 配置安全设置 (全部禁用)
64
  var safetySettings = []*genai.SafetySetting{
65
+ {
66
+ Category: genai.HarmCategoryHarassment,
67
+ Threshold: genai.HarmBlockNone,
68
+ },
69
+ {
70
+ Category: genai.HarmCategoryHateSpeech,
71
+ Threshold: genai.HarmBlockNone,
72
+ },
73
+ {
74
+ Category: genai.HarmCategorySexuallyExplicit,
75
+ Threshold: genai.HarmBlockNone,
76
+ },
77
+ {
78
+ Category: genai.HarmCategoryDangerousContent,
79
+ Threshold: genai.HarmBlockNone,
80
+ },
81
  }
82
 
83
  const maxRetries = 3
 
156
  return apiKeys[r.Intn(len(apiKeys))]
157
  }
158
 
159
+ // convertMessages 将OpenAI格式的消息转换为Gemini格式的历史记录和最后一个用户的提示
160
  func convertMessages(messages []ChatMessage) (history []*genai.Content, lastPrompt []genai.Part, systemInstruction *genai.Content) {
161
  if len(messages) == 0 {
162
  return nil, nil, nil
163
  }
164
+
165
  for i, msg := range messages {
166
  var role string
167
  if msg.Role == "system" {
168
  systemInstruction = &genai.Content{Parts: []genai.Part{genai.Text(msg.Content)}}
169
  continue
170
  }
171
+
172
  if i == len(messages)-1 && msg.Role == "user" {
173
  lastPrompt = append(lastPrompt, genai.Text(msg.Content))
174
  continue
175
  }
176
+
177
  if msg.Role == "assistant" {
178
  role = "model"
179
  } else {
180
  role = "user"
181
  }
182
+
183
  history = append(history, &genai.Content{
184
  Role: role,
185
  Parts: []genai.Part{genai.Text(msg.Content)},
 
193
  http.Error(w, "仅支持POST方法", http.StatusMethodNotAllowed)
194
  return
195
  }
196
+
197
  var req ChatCompletionRequest
198
  if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
199
  http.Error(w, fmt.Sprintf("解析请求体失败: %v", err), http.StatusBadRequest)
200
  return
201
  }
202
 
203
+ // 根据您的要求,直接使用请求中的模型名称
204
+ modelName := req.Model
205
+ log.Printf("接收到模型请求: '%s',将直接使用该名称。", modelName)
206
+
 
 
207
 
208
  history, lastPrompt, systemInstruction := convertMessages(req.Messages)
209
+
210
  var lastErr error
211
  usedKeys := make(map[string]bool)
212
 
213
  for i := 0; i < maxRetries; i++ {
214
  ctx := context.Background()
215
  apiKey := getRandomAPIKey()
216
+
217
  if len(usedKeys) < len(apiKeys) {
218
  for usedKeys[apiKey] {
219
  apiKey = getRandomAPIKey()
220
  }
221
  }
222
  usedKeys[apiKey] = true
223
+
224
  log.Printf("尝试第 %d 次, 使用密钥: ...%s", i+1, apiKey[len(apiKey)-4:])
225
 
226
  client, err := genai.NewClient(ctx, option.WithAPIKey(apiKey))
 
231
  }
232
  defer client.Close()
233
 
234
+ model := client.GenerativeModel(modelName)
235
  model.SystemInstruction = systemInstruction
236
  model.SafetySettings = safetySettings
237
  model.SetTemperature(req.Temperature)
 
252
  if err == nil {
253
  return
254
  }
255
+
256
  lastErr = err
257
  log.Printf("第 %d 次尝试失败: %v", i+1, err)
258
  time.Sleep(1 * time.Second)
259
  }
260
+
261
  http.Error(w, fmt.Sprintf("所有重试均失败: %v", lastErr), http.StatusInternalServerError)
262
  }
263
 
 
269
  iter := chat.SendMessageStream(ctx, prompt...)
270
  for {
271
  resp, err := iter.Next()
272
+ if err == iterator.Done {
273
+ break
274
+ }
275
+ if err != nil {
276
+ return fmt.Errorf("流式生成内容失败: %v", err)
277
+ }
278
 
279
  var contentBuilder strings.Builder
280
  for _, part := range resp.Candidates[0].Content.Parts {
 
288
  Object: "chat.completion.chunk",
289
  Created: time.Now().Unix(),
290
  Model: modelID,
291
+ Choices: []StreamChoice{
292
+ {
293
+ Index: 0,
294
+ Delta: ChatMessage{
295
+ Role: "assistant",
296
+ Content: contentBuilder.String(),
297
+ },
298
+ },
299
+ },
300
  }
301
+
302
  var buf bytes.Buffer
303
+ if err := json.NewEncoder(&buf).Encode(chunk); err != nil {
304
+ return fmt.Errorf("序列化流式块失败: %v", err)
305
+ }
306
+
307
  fmt.Fprintf(w, "data: %s\n\n", buf.String())
308
+ if flusher, ok := w.(http.Flusher); ok {
309
+ flusher.Flush()
310
+ }
311
  }
312
 
313
  finishReason := "stop"
314
  doneChunk := ChatCompletionStreamResponse{
315
+ ID: fmt.Sprintf("chatcmpl-%d-done", time.Now().Unix()),
316
+ Object: "chat.completion.chunk",
317
  Created: time.Now().Unix(),
318
+ Model: modelID,
319
+ Choices: []StreamChoice{
320
+ {
321
+ Index: 0,
322
+ FinishReason: &finishReason,
323
+ },
324
+ },
325
  }
326
  var buf bytes.Buffer
327
  json.NewEncoder(&buf).Encode(doneChunk)
328
  fmt.Fprintf(w, "data: %s\n\n", buf.String())
329
  fmt.Fprintf(w, "data: [DONE]\n\n")
330
+ if flusher, ok := w.(http.Flusher); ok {
331
+ flusher.Flush()
332
+ }
333
+
334
  return nil
335
  }
336
 
337
  func handleNonStream(w http.ResponseWriter, ctx context.Context, model *genai.GenerativeModel, chat *genai.ChatSession, prompt []genai.Part, modelID string) error {
338
  resp, err := chat.SendMessage(ctx, prompt...)
339
+ if err != nil {
340
+ return fmt.Errorf("生成内容失败: %v", err)
341
+ }
342
 
343
  var contentBuilder strings.Builder
344
  if len(resp.Candidates) > 0 && resp.Candidates[0].Content != nil {
345
  for _, part := range resp.Candidates[0].Content.Parts {
346
+ if txt, ok := part.(genai.Text); ok {
347
+ contentBuilder.WriteString(string(txt))
348
+ }
349
  }
350
  }
351
 
352
+ // 计算Token
353
  var promptParts []genai.Part
354
+ for _, c := range chat.History {
355
+ promptParts = append(promptParts, c.Parts...)
356
+ }
357
  promptParts = append(promptParts, prompt...)
358
+
359
  promptTokenCount, err := model.CountTokens(ctx, promptParts...)
360
+ if err != nil {
361
+ return fmt.Errorf("计算prompt tokens失败: %v", err)
362
+ }
363
+
364
  completionTokenCount, err := model.CountTokens(ctx, resp.Candidates[0].Content.Parts...)
365
+ if err != nil {
366
+ return fmt.Errorf("计算completion tokens失败: %v", err)
367
+ }
368
 
369
  response := ChatCompletionResponse{
370
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
371
  Object: "chat.completion",
372
  Created: time.Now().Unix(),
373
  Model: modelID,
374
+ Choices: []Choice{
375
+ {
376
+ Index: 0,
377
+ Message: ChatMessage{
378
+ Role: "assistant",
379
+ Content: contentBuilder.String(),
380
+ },
381
+ FinishReason: "stop",
382
+ },
383
+ },
384
+ Usage: Usage{
385
+ PromptTokens: int(promptTokenCount.TotalTokens),
386
+ CompletionTokens: int(completionTokenCount.TotalTokens),
387
+ TotalTokens: int(promptTokenCount.TotalTokens) + int(completionTokenCount.TotalTokens),
388
+ },
389
  }
390
+
391
  w.Header().Set("Content-Type", "application/json")
392
  return json.NewEncoder(w).Encode(response)
393
  }
394
 
395
+
396
  func modelsHandler(w http.ResponseWriter, r *http.Request) {
397
  resp := ModelListResponse{
398
  Object: "list",
 
404
 
405
  func rootHandler(w http.ResponseWriter, r *http.Request) {
406
  info := map[string]interface{}{
407
+ "name": "Gemini Official API (Go Version)",
408
+ "version": "1.3.0",
409
+ "description": "Google Gemini官方API接口服务",
410
+ "endpoints": map[string]string{
411
+ "models": "/v1/models",
412
+ "chat": "/v1/chat/completions",
413
+ "health": "/health",
414
+ },
415
  }
416
  w.Header().Set("Content-Type", "application/json")
417
  json.NewEncoder(w).Encode(info)
 
419
 
420
  func healthHandler(w http.ResponseWriter, r *http.Request) {
421
  var modelIDs []string
422
+ for _, m := range supportedModels {
423
+ modelIDs = append(modelIDs, m.ID)
424
+ }
425
  health := map[string]interface{}{
426
+ "status": "healthy",
427
+ "timestamp": time.Now().Unix(),
428
+ "api": "gemini-official-go",
429
  "available_models": modelIDs,
430
+ "version": "1.3.0",
431
  }
432
  w.Header().Set("Content-Type", "application/json")
433
  json.NewEncoder(w).Encode(health)
 
435
 
436
  func main() {
437
  mux := http.NewServeMux()
438
+
439
  mux.HandleFunc("/", rootHandler)
440
  mux.HandleFunc("/health", healthHandler)
441
  mux.HandleFunc("/v1/models", modelsHandler)
442
  mux.HandleFunc("/v1/chat/completions", chatCompletionsHandler)
443
+ mux.HandleFunc("/v1/chat/completions/v1/models", modelsHandler)
444
+
445
  c := cors.New(cors.Options{
446
  AllowedOrigins: []string{"*"},
447
  AllowedMethods: []string{"GET", "POST", "OPTIONS"},
 
451
  handler := c.Handler(mux)
452
 
453
  port := "7860"
454
+ log.Println("🚀 启动Gemini官方API服务器 (Go版本)")
455
+ log.Printf("📊 支持的模型: %v", func() []string {
456
+ var ids []string
457
+ for _, m := range supportedModels {
458
+ ids = append(ids, m.ID)
459
+ }
460
+ return ids
461
+ }())
462
  log.Printf("🔑 已配置 %d 个API密钥", len(apiKeys))
463
  log.Println("🔄 支持自动重试和密钥轮换")
464
  log.Printf("🔗 服务器正在监听 http://0.0.0.0:%s", port)