sanbo1200 commited on
Commit
d48b38e
·
verified ·
1 Parent(s): 35373d0

Update main.go

Browse files
Files changed (1) hide show
  1. main.go +1329 -134
main.go CHANGED
@@ -11,70 +11,71 @@ import (
11
  "os"
12
  "regexp"
13
  "strings"
 
14
  "time"
15
  )
16
 
17
  // 配置变量(从环境变量读取)
18
  var (
19
- UPSTREAM_URL string
20
- DEFAULT_KEY string
21
- UPSTREAM_TOKEN string
22
- MODEL_NAME string
23
- PORT string
24
- DEBUG_MODE bool
25
- DEFAULT_STREAM bool
 
 
 
 
26
  )
27
 
28
- // 思考内容处理策略
29
- const (
30
- THINK_TAGS_MODE = "strip" // strip: 去除<details>标签;think: 转为<think>标签;raw: 保留原样
31
- )
32
-
33
- // 伪装前端头部(来自抓包)
34
- const (
35
- X_FE_VERSION = "prod-fe-1.0.70"
36
- BROWSER_UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36 Edg/139.0.0.0"
37
- SEC_CH_UA = "\"Not;A=Brand\";v=\"99\", \"Microsoft Edge\";v=\"139\", \"Chromium\";v=\"139\""
38
- SEC_CH_UA_MOB = "?0"
39
- SEC_CH_UA_PLAT = "\"Windows\""
40
- ORIGIN_BASE = "https://chat.z.ai"
41
- )
42
-
43
- // 匿名token开关
44
- const ANON_TOKEN_ENABLED = true
45
-
46
- // 从环境变量初始化配置
47
- func initConfig() {
48
- UPSTREAM_URL = getEnv("UPSTREAM_URL", "https://chat.z.ai/api/chat/completions")
49
- DEFAULT_KEY = getEnv("DEFAULT_KEY", "sk-your-key")
50
- UPSTREAM_TOKEN = getEnv("UPSTREAM_TOKEN", "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMxNmJjYjQ4LWZmMmYtNGExNS04NTNkLWYyYTI5YjY3ZmYwZiIsImVtYWlsIjoiR3Vlc3QtMTc1NTg0ODU4ODc4OEBndWVzdC5jb20ifQ.PktllDySS3trlyuFpTeIZf-7hl8Qu1qYF3BxjgIul0BrNux2nX9hVzIjthLXKMWAf9V0qM8Vm_iyDqkjPGsaiQ")
51
- MODEL_NAME = getEnv("MODEL_NAME", "GLM-4.5")
52
- PORT = getEnv("PORT", "7860")
53
 
54
- // 处理PORT格式,确保有冒号前缀
55
- if !strings.HasPrefix(PORT, ":") {
56
- PORT = ":" + PORT
57
- }
 
 
 
 
 
 
58
 
59
- DEBUG_MODE = getEnv("DEBUG_MODE", "true") == "true"
60
- DEFAULT_STREAM = getEnv("DEFAULT_STREAM", "true") == "true"
 
 
61
  }
62
 
63
- // 获取环境变量,如果不存在则返回默认值
64
- func getEnv(key, defaultValue string) string {
65
- if value := os.Getenv(key); value != "" {
66
- return value
67
- }
68
- return defaultValue
 
 
 
 
69
  }
70
 
71
  // OpenAI 请求结构
72
  type OpenAIRequest struct {
73
- Model string `json:"model"`
74
- Messages []Message `json:"messages"`
75
- Stream bool `json:"stream,omitempty"`
76
- Temperature float64 `json:"temperature,omitempty"`
77
- MaxTokens int `json:"max_tokens,omitempty"`
 
78
  }
79
 
80
  type Message struct {
@@ -158,12 +159,320 @@ type ModelsResponse struct {
158
  }
159
 
160
  type Model struct {
161
- ID string `json:"id"`
162
  Object string `json:"object"`
 
163
  Created int64 `json:"created"`
164
  OwnedBy string `json:"owned_by"`
165
  }
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  // debug日志函数
168
  func debugLog(format string, args ...interface{}) {
169
  if DEBUG_MODE {
@@ -188,7 +497,6 @@ func getAnonymousToken() (string, error) {
188
  req.Header.Set("sec-ch-ua-platform", SEC_CH_UA_PLAT)
189
  req.Header.Set("Origin", ORIGIN_BASE)
190
  req.Header.Set("Referer", ORIGIN_BASE+"/")
191
-
192
  resp, err := client.Do(req)
193
  if err != nil {
194
  return "", err
@@ -212,21 +520,870 @@ func getAnonymousToken() (string, error) {
212
  func main() {
213
  // 初始化配置
214
  initConfig()
215
-
216
- // http.HandleFunc("/v1/models", handleModels)
217
- // http.HandleFunc("/v1/chat/completions", handleChatCompletions)
 
 
 
218
  http.HandleFunc("/api/v1/models", handleModels)
219
  http.HandleFunc("/api/v1/chat/completions", handleChatCompletions)
220
- http.HandleFunc("/", handleOptions)
221
-
 
 
 
 
 
 
 
222
  log.Printf("OpenAI兼容API服务器启动在端口%s", PORT)
223
- log.Printf("模型: %s", MODEL_NAME)
224
  log.Printf("上游: %s", UPSTREAM_URL)
225
  log.Printf("Debug模式: %v", DEBUG_MODE)
226
  log.Printf("默认流式响应: %v", DEFAULT_STREAM)
 
 
227
  log.Fatal(http.ListenAndServe(PORT, nil))
228
  }
229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  func handleOptions(w http.ResponseWriter, r *http.Request) {
231
  setCORSHeaders(w)
232
  if r.Method == "OPTIONS" {
@@ -243,6 +1400,7 @@ func setCORSHeaders(w http.ResponseWriter) {
243
  w.Header().Set("Access-Control-Allow-Credentials", "true")
244
  }
245
 
 
246
  func handleModels(w http.ResponseWriter, r *http.Request) {
247
  setCORSHeaders(w)
248
  if r.Method == "OPTIONS" {
@@ -252,14 +1410,7 @@ func handleModels(w http.ResponseWriter, r *http.Request) {
252
 
253
  response := ModelsResponse{
254
  Object: "list",
255
- Data: []Model{
256
- {
257
- ID: MODEL_NAME,
258
- Object: "model",
259
- Created: time.Now().Unix(),
260
- OwnedBy: "z.ai",
261
- },
262
- },
263
  }
264
 
265
  w.Header().Set("Content-Type", "application/json")
@@ -267,69 +1418,109 @@ func handleModels(w http.ResponseWriter, r *http.Request) {
267
  }
268
 
269
  func handleChatCompletions(w http.ResponseWriter, r *http.Request) {
 
 
 
 
270
  setCORSHeaders(w)
271
  if r.Method == "OPTIONS" {
272
  w.WriteHeader(http.StatusOK)
273
  return
274
  }
275
-
276
  debugLog("收到chat completions请求")
277
 
278
- // // 验证API Key
279
- // authHeader := r.Header.Get("Authorization")
280
- // if !strings.HasPrefix(authHeader, "Bearer ") {
281
- // debugLog("缺少或无效的Authorization头")
282
- // http.Error(w, "Missing or invalid Authorization header", http.StatusUnauthorized)
283
- // return
284
- // }
285
- // // 去除key验证
286
- // apiKey := strings.TrimPrefix(authHeader, "Bearer ")
287
- // if apiKey != DEFAULT_KEY {
288
- // debugLog("无效的API key: %s", apiKey)
289
- // http.Error(w, "Invalid API key", http.StatusUnauthorized)
290
- // return
291
- // }
292
-
293
- // debugLog("API key验证通过")
294
-
 
 
 
 
 
 
295
  // 读取请求体
296
  body, err := io.ReadAll(r.Body)
297
  if err != nil {
298
  debugLog("读取请求体失败: %v", err)
299
  http.Error(w, "Failed to read request body", http.StatusBadRequest)
 
 
 
 
300
  return
301
  }
302
-
303
  // 解析请求
304
  var req OpenAIRequest
305
  if err := json.Unmarshal(body, &req); err != nil {
306
  debugLog("JSON解析失败: %v", err)
307
  http.Error(w, "Invalid JSON", http.StatusBadRequest)
 
 
 
 
308
  return
309
  }
310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  // 如果客户端没有明确指定stream参数,使用默认值
312
  if !bytes.Contains(body, []byte(`"stream"`)) {
313
  req.Stream = DEFAULT_STREAM
314
  debugLog("客户端未指定stream参数,使用默认值: %v", DEFAULT_STREAM)
315
  }
316
-
317
- debugLog("请求解析成功 - 模型: %s, 流式: %v, 消息数: %d", req.Model, req.Stream, len(req.Messages))
318
 
319
  // 生成会话相关ID
320
  chatID := fmt.Sprintf("%d-%d", time.Now().UnixNano(), time.Now().Unix())
321
  msgID := fmt.Sprintf("%d", time.Now().UnixNano())
322
 
323
- // 构造上游请求
 
 
 
 
 
 
 
 
 
324
  upstreamReq := UpstreamRequest{
325
  Stream: true, // 总是使用流式从上游获取
326
  ChatID: chatID,
327
  ID: msgID,
328
- Model: "0727-360B-API", // 上游实际模型ID
329
  Messages: req.Messages,
330
  Params: map[string]interface{}{},
331
  Features: map[string]interface{}{
332
- "enable_thinking": true,
333
  },
334
  BackgroundTasks: map[string]bool{
335
  "title_generation": false,
@@ -340,7 +1531,7 @@ func handleChatCompletions(w http.ResponseWriter, r *http.Request) {
340
  ID string `json:"id"`
341
  Name string `json:"name"`
342
  OwnedBy string `json:"owned_by"`
343
- }{ID: "0727-360B-API", Name: "GLM-4.5", OwnedBy: "openai"},
344
  ToolServers: []string{},
345
  Variables: map[string]string{
346
  "{{USER_NAME}}": "User",
@@ -350,7 +1541,7 @@ func handleChatCompletions(w http.ResponseWriter, r *http.Request) {
350
  }
351
 
352
  // 选择本次对话使用的token
353
- authToken := UPSTREAM_TOKEN
354
  if ANON_TOKEN_ENABLED {
355
  if t, err := getAnonymousToken(); err == nil {
356
  authToken = t
@@ -365,11 +1556,11 @@ func handleChatCompletions(w http.ResponseWriter, r *http.Request) {
365
  }
366
  }
367
 
368
- // 调用上游API
369
  if req.Stream {
370
- handleStreamResponseWithIDs(w, upstreamReq, chatID, authToken)
371
  } else {
372
- handleNonStreamResponseWithIDs(w, upstreamReq, chatID, authToken)
373
  }
374
  }
375
 
@@ -379,16 +1570,13 @@ func callUpstreamWithHeaders(upstreamReq UpstreamRequest, refererChatID string,
379
  debugLog("上游请求序列化失败: %v", err)
380
  return nil, err
381
  }
382
-
383
  debugLog("调用上游API: %s", UPSTREAM_URL)
384
  debugLog("上游请求体: %s", string(reqBody))
385
-
386
  req, err := http.NewRequest("POST", UPSTREAM_URL, bytes.NewBuffer(reqBody))
387
  if err != nil {
388
  debugLog("创建HTTP请求失败: %v", err)
389
  return nil, err
390
  }
391
-
392
  req.Header.Set("Content-Type", "application/json")
393
  req.Header.Set("Accept", "application/json, text/event-stream")
394
  req.Header.Set("User-Agent", BROWSER_UA)
@@ -400,29 +1588,30 @@ func callUpstreamWithHeaders(upstreamReq UpstreamRequest, refererChatID string,
400
  req.Header.Set("X-FE-Version", X_FE_VERSION)
401
  req.Header.Set("Origin", ORIGIN_BASE)
402
  req.Header.Set("Referer", ORIGIN_BASE+"/c/"+refererChatID)
403
-
404
  client := &http.Client{Timeout: 60 * time.Second}
405
  resp, err := client.Do(req)
406
  if err != nil {
407
  debugLog("上游请求失败: %v", err)
408
  return nil, err
409
  }
410
-
411
  debugLog("上游响应状态: %d %s", resp.StatusCode, resp.Status)
412
  return resp, nil
413
  }
414
 
415
- func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequest, chatID string, authToken string) {
416
- debugLog("开始处理流式响应 (chat_id=%s)", chatID)
417
-
418
  resp, err := callUpstreamWithHeaders(upstreamReq, chatID, authToken)
419
  if err != nil {
420
  debugLog("调用上游失败: %v", err)
421
  http.Error(w, "Failed to call upstream", http.StatusBadGateway)
 
 
 
 
422
  return
423
  }
424
  defer resp.Body.Close()
425
-
426
  if resp.StatusCode != http.StatusOK {
427
  debugLog("上游返回错误状态: %d", resp.StatusCode)
428
  // 读取错误响应体
@@ -431,6 +1620,10 @@ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequ
431
  debugLog("上游错误响应: %s", string(body))
432
  }
433
  http.Error(w, "Upstream error", http.StatusBadGateway)
 
 
 
 
434
  return
435
  }
436
 
@@ -453,7 +1646,7 @@ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequ
453
  }
454
  // 处理每行前缀 "> "(包括起始位置)
455
  s = strings.TrimPrefix(s, "> ")
456
- s = strings.ReplaceAll(s, "\n> ", "\n")
457
  return strings.TrimSpace(s)
458
  }
459
 
@@ -461,19 +1654,18 @@ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequ
461
  w.Header().Set("Content-Type", "text/event-stream")
462
  w.Header().Set("Cache-Control", "no-cache")
463
  w.Header().Set("Connection", "keep-alive")
464
-
465
  flusher, ok := w.(http.Flusher)
466
  if !ok {
467
  http.Error(w, "Streaming unsupported", http.StatusInternalServerError)
468
  return
469
  }
470
 
471
- // 发送第一个chunk(role
472
  firstChunk := OpenAIResponse{
473
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
474
  Object: "chat.completion.chunk",
475
  Created: time.Now().Unix(),
476
- Model: MODEL_NAME,
477
  Choices: []Choice{
478
  {
479
  Index: 0,
@@ -488,22 +1680,17 @@ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequ
488
  debugLog("开始读取上游SSE流")
489
  scanner := bufio.NewScanner(resp.Body)
490
  lineCount := 0
491
-
492
  for scanner.Scan() {
493
  line := scanner.Text()
494
  lineCount++
495
-
496
  if !strings.HasPrefix(line, "data: ") {
497
  continue
498
  }
499
-
500
  dataStr := strings.TrimPrefix(line, "data: ")
501
  if dataStr == "" {
502
  continue
503
  }
504
-
505
  debugLog("收到SSE数据 (第%d行): %s", lineCount, dataStr)
506
-
507
  var upstreamData UpstreamData
508
  if err := json.Unmarshal([]byte(dataStr), &upstreamData); err != nil {
509
  debugLog("SSE数据解析失败: %v", err)
@@ -525,11 +1712,11 @@ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequ
525
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
526
  Object: "chat.completion.chunk",
527
  Created: time.Now().Unix(),
528
- Model: MODEL_NAME,
529
  Choices: []Choice{{Index: 0, Delta: Delta{}, FinishReason: "stop"}},
530
  }
531
  writeSSEChunk(w, endChunk)
532
- fmt.Fprintf(w, "data: [DONE]\n\n")
533
  flusher.Flush()
534
  break
535
  }
@@ -549,7 +1736,7 @@ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequ
549
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
550
  Object: "chat.completion.chunk",
551
  Created: time.Now().Unix(),
552
- Model: MODEL_NAME,
553
  Choices: []Choice{
554
  {
555
  Index: 0,
@@ -570,7 +1757,7 @@ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequ
570
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
571
  Object: "chat.completion.chunk",
572
  Created: time.Now().Unix(),
573
- Model: MODEL_NAME,
574
  Choices: []Choice{
575
  {
576
  Index: 0,
@@ -581,36 +1768,42 @@ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequ
581
  }
582
  writeSSEChunk(w, endChunk)
583
  flusher.Flush()
584
-
585
  // 发送[DONE]
586
- fmt.Fprintf(w, "data: [DONE]\n\n")
587
  flusher.Flush()
588
  debugLog("流式响应完成,共处理%d行", lineCount)
589
  break
590
  }
591
  }
592
-
593
  if err := scanner.Err(); err != nil {
594
  debugLog("扫描器错误: %v", err)
595
  }
 
 
 
 
 
596
  }
597
 
598
  func writeSSEChunk(w http.ResponseWriter, chunk OpenAIResponse) {
599
  data, _ := json.Marshal(chunk)
600
- fmt.Fprintf(w, "data: %s\n\n", data)
601
  }
602
 
603
- func handleNonStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequest, chatID string, authToken string) {
604
- debugLog("开始处理非流式响应 (chat_id=%s)", chatID)
605
-
606
  resp, err := callUpstreamWithHeaders(upstreamReq, chatID, authToken)
607
  if err != nil {
608
  debugLog("调用上游失败: %v", err)
609
  http.Error(w, "Failed to call upstream", http.StatusBadGateway)
 
 
 
 
610
  return
611
  }
612
  defer resp.Body.Close()
613
-
614
  if resp.StatusCode != http.StatusOK {
615
  debugLog("上游返回错误状态: %d", resp.StatusCode)
616
  // 读取错误响应体
@@ -619,6 +1812,10 @@ func handleNonStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamR
619
  debugLog("上游错误响应: %s", string(body))
620
  }
621
  http.Error(w, "Upstream error", http.StatusBadGateway)
 
 
 
 
622
  return
623
  }
624
 
@@ -626,23 +1823,19 @@ func handleNonStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamR
626
  var fullContent strings.Builder
627
  scanner := bufio.NewScanner(resp.Body)
628
  debugLog("开始收集完整响应内容")
629
-
630
  for scanner.Scan() {
631
  line := scanner.Text()
632
  if !strings.HasPrefix(line, "data: ") {
633
  continue
634
  }
635
-
636
  dataStr := strings.TrimPrefix(line, "data: ")
637
  if dataStr == "" {
638
  continue
639
  }
640
-
641
  var upstreamData UpstreamData
642
  if err := json.Unmarshal([]byte(dataStr), &upstreamData); err != nil {
643
  continue
644
  }
645
-
646
  if upstreamData.Data.DeltaContent != "" {
647
  out := upstreamData.Data.DeltaContent
648
  if upstreamData.Data.Phase == "thinking" {
@@ -662,7 +1855,7 @@ func handleNonStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamR
662
  s = strings.ReplaceAll(s, "</details>", "")
663
  }
664
  s = strings.TrimPrefix(s, "> ")
665
- s = strings.ReplaceAll(s, "\n> ", "\n")
666
  return strings.TrimSpace(s)
667
  }(out)
668
  }
@@ -670,22 +1863,20 @@ func handleNonStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamR
670
  fullContent.WriteString(out)
671
  }
672
  }
673
-
674
  if upstreamData.Data.Done || upstreamData.Data.Phase == "done" {
675
  debugLog("检测到完成信号,停止收集")
676
  break
677
  }
678
  }
679
-
680
  finalContent := fullContent.String()
681
  debugLog("内容收集完成,最终长度: %d", len(finalContent))
682
 
683
- // 构造完整响应
684
  response := OpenAIResponse{
685
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
686
  Object: "chat.completion",
687
  Created: time.Now().Unix(),
688
- Model: MODEL_NAME,
689
  Choices: []Choice{
690
  {
691
  Index: 0,
@@ -702,8 +1893,12 @@ func handleNonStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamR
702
  TotalTokens: 0,
703
  },
704
  }
705
-
706
  w.Header().Set("Content-Type", "application/json")
707
  json.NewEncoder(w).Encode(response)
708
  debugLog("非流式响应发送完成")
 
 
 
 
 
709
  }
 
11
  "os"
12
  "regexp"
13
  "strings"
14
+ "sync"
15
  "time"
16
  )
17
 
18
  // 配置变量(从环境变量读取)
19
  var (
20
+ UPSTREAM_URL string
21
+ DEFAULT_KEY string
22
+ ZAI_TOKEN string
23
+ MODEL_NAME string // 未使用,因为现在动态获取
24
+ PORT string
25
+ DEBUG_MODE bool
26
+ DEFAULT_STREAM bool
27
+ DASHBOARD_ENABLED bool
28
+ ENABLE_THINKING bool
29
+ MODELS_URL string // 新增:模型列表URL
30
+ DEFAULT_UPSTREAM_MODEL_ID string // 新增:默认上游模型ID
31
  )
32
 
33
+ // 请求统计信息
34
+ type RequestStats struct {
35
+ TotalRequests int64
36
+ SuccessfulRequests int64
37
+ FailedRequests int64
38
+ LastRequestTime time.Time
39
+ AverageResponseTime time.Duration
40
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ // 实时请求信息
43
+ type LiveRequest struct {
44
+ ID string `json:"id"`
45
+ Timestamp time.Time `json:"timestamp"`
46
+ Method string `json:"method"`
47
+ Path string `json:"path"`
48
+ Status int `json:"status"`
49
+ Duration int64 `json:"duration"`
50
+ UserAgent string `json:"user_agent"`
51
+ }
52
 
53
+ // 上游模型响应结构 (新增)
54
+ type UpstreamModelsResponse struct {
55
+ Object string `json:"object"`
56
+ Data []UpstreamModel `json:"data"`
57
  }
58
 
59
+ type UpstreamModel struct {
60
+ ID string `json:"id"`
61
+ Name string `json:"name"`
62
+ Object string `json:"object"`
63
+ Created int64 `json:"created"`
64
+ OwnedBy string `json:"owned_by"`
65
+ Info struct {
66
+ IsActive bool `json:"is_active"`
67
+ CreatedAt int64 `json:"created_at"`
68
+ } `json:"info"`
69
  }
70
 
71
  // OpenAI 请求结构
72
  type OpenAIRequest struct {
73
+ Model string `json:"model"`
74
+ Messages []Message `json:"messages"`
75
+ Stream bool `json:"stream,omitempty"`
76
+ Temperature float64 `json:"temperature,omitempty"`
77
+ MaxTokens int `json:"max_tokens,omitempty"`
78
+ EnableThinking *bool `json:"enable_thinking,omitempty"`
79
  }
80
 
81
  type Message struct {
 
159
  }
160
 
161
  type Model struct {
162
+ ID string `json:"id"` // 保持ID字段
163
  Object string `json:"object"`
164
+ Name string `json:"name"` // 新增Name字段,用于显示
165
  Created int64 `json:"created"`
166
  OwnedBy string `json:"owned_by"`
167
  }
168
 
169
+ // 全局变量
170
+ var (
171
+ stats RequestStats
172
+ liveRequests = []LiveRequest{} // 初始化为空数组,而不是 nil
173
+ statsMutex sync.Mutex
174
+ requestsMutex sync.Mutex
175
+ modelsCache []Model // 新增:缓存模型列表
176
+ modelsMutex sync.RWMutex // 新增:保护模型缓存的读写锁
177
+ )
178
+
179
+ // 思考内容处理策略
180
+ const (
181
+ THINK_TAGS_MODE = "strip" // strip: 去除<details>标签;think: 转为<think>标签;raw: 保留原样
182
+ )
183
+
184
+ // 伪装前端头部(来自抓包)
185
+ const (
186
+ X_FE_VERSION = "prod-fe-1.0.70"
187
+ BROWSER_UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36 Edg/139.0.0.0"
188
+ SEC_CH_UA = "\"Not;A=Brand\";v=\"99\", \"Microsoft Edge\";v=\"139\", \"Chromium\";v=\"139\""
189
+ SEC_CH_UA_MOB = "?0"
190
+ SEC_CH_UA_PLAT = "\"Windows\""
191
+ ORIGIN_BASE = "https://chat.z.ai"
192
+ )
193
+
194
+ // 匿名token开关
195
+ const ANON_TOKEN_ENABLED = true
196
+
197
+ // 从环境变量初始化配置
198
+ func initConfig() {
199
+ UPSTREAM_URL = getEnv("UPSTREAM_URL", "https://chat.z.ai/api/chat/completions")
200
+ DEFAULT_KEY = getEnv("DEFAULT_KEY", "sk-your-key")
201
+ ZAI_TOKEN = getEnv("ZAI_TOKEN", "")
202
+ MODEL_NAME = getEnv("MODEL_NAME", "GLM-4.5") // 未使用,但保留
203
+ PORT = getEnv("PORT", "7860")
204
+ MODELS_URL = getEnv("MODELS_URL", "https://chat.z.ai/api/models") // 新增
205
+ DEFAULT_UPSTREAM_MODEL_ID = getEnv("DEFAULT_UPSTREAM_MODEL_ID", "0727-360B-API") // 新增
206
+ // 处理PORT格式,确保有冒号前缀
207
+ if !strings.HasPrefix(PORT, ":") {
208
+ PORT = ":" + PORT
209
+ }
210
+ DEBUG_MODE = getEnv("DEBUG_MODE", "true") == "true"
211
+ DEFAULT_STREAM = getEnv("DEFAULT_STREAM", "true") == "true"
212
+ DASHBOARD_ENABLED = getEnv("DASHBOARD_ENABLED", "true") == "true"
213
+ ENABLE_THINKING = getEnv("ENABLE_THINKING", "true") == "true"
214
+ }
215
+
216
+ // 记录请求统计信息
217
+ func recordRequestStats(startTime time.Time, path string, status int) {
218
+ duration := time.Since(startTime)
219
+ statsMutex.Lock()
220
+ defer statsMutex.Unlock()
221
+ stats.TotalRequests++
222
+ stats.LastRequestTime = time.Now()
223
+ if status >= 200 && status < 300 {
224
+ stats.SuccessfulRequests++
225
+ } else {
226
+ stats.FailedRequests++
227
+ }
228
+ // 更新平均响应时间
229
+ if stats.TotalRequests > 0 {
230
+ totalDuration := stats.AverageResponseTime*time.Duration(stats.TotalRequests-1) + duration
231
+ stats.AverageResponseTime = totalDuration / time.Duration(stats.TotalRequests)
232
+ } else {
233
+ stats.AverageResponseTime = duration
234
+ }
235
+ }
236
+
237
+ // 添加实时请求信息
238
+ func addLiveRequest(method, path string, status int, duration time.Duration, _, userAgent string) {
239
+ requestsMutex.Lock()
240
+ defer requestsMutex.Unlock()
241
+ request := LiveRequest{
242
+ ID: fmt.Sprintf("%d", time.Now().UnixNano()),
243
+ Timestamp: time.Now(),
244
+ Method: method,
245
+ Path: path,
246
+ Status: status,
247
+ Duration: duration.Milliseconds(),
248
+ UserAgent: userAgent,
249
+ }
250
+ liveRequests = append(liveRequests, request)
251
+ // 只保留最近的100条请求
252
+ if len(liveRequests) > 100 {
253
+ liveRequests = liveRequests[1:]
254
+ }
255
+ }
256
+
257
+ // 获取实时请求数据(用于SSE)
258
+ func getLiveRequestsData() []byte {
259
+ requestsMutex.Lock()
260
+ defer requestsMutex.Unlock()
261
+ // 确保 liveRequests 不为 nil
262
+ if liveRequests == nil {
263
+ liveRequests = []LiveRequest{}
264
+ }
265
+ data, err := json.Marshal(liveRequests)
266
+ if err != nil {
267
+ // 如果序列化失败,返回空数组
268
+ emptyArray := []LiveRequest{}
269
+ data, _ = json.Marshal(emptyArray)
270
+ }
271
+ return data
272
+ }
273
+
274
+ // 获取统计数据(用于SSE)
275
+ func getStatsData() []byte {
276
+ statsMutex.Lock()
277
+ defer statsMutex.Unlock()
278
+ data, _ := json.Marshal(stats)
279
+ return data
280
+ }
281
+
282
+ // 获取环境变量,如果不存在则返回默认值
283
+ func getEnv(key, defaultValue string) string {
284
+ if value := os.Getenv(key); value != "" {
285
+ return value
286
+ }
287
+ return defaultValue
288
+ }
289
+
290
+ // 获取客户端IP地址
291
+ func getClientIP(r *http.Request) string {
292
+ // 检查X-Forwarded-For头
293
+ if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
294
+ ips := strings.Split(xff, ",")
295
+ if len(ips) > 0 {
296
+ return strings.TrimSpace(ips[0])
297
+ }
298
+ }
299
+ // 检查X-Real-IP头
300
+ if xri := r.Header.Get("X-Real-IP"); xri != "" {
301
+ return xri
302
+ }
303
+ // 使用RemoteAddr
304
+ ip := r.RemoteAddr
305
+ // 移除端口号
306
+ if strings.Contains(ip, ":") {
307
+ ip = strings.Split(ip, ":")[0]
308
+ }
309
+ return ip
310
+ }
311
+
312
+ // 检查字符是否为英文字母
313
+ func isEnglishLetter(r rune) bool {
314
+ return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z')
315
+ }
316
+
317
+ // 检查字符串是否包含英文字母
318
+ func hasEnglishLetter(s string) bool {
319
+ for _, r := range s {
320
+ if isEnglishLetter(r) {
321
+ return true
322
+ }
323
+ }
324
+ return false
325
+ }
326
+
327
+ // 检查字符串是否为纯数字
328
+ func isDigit(s string) bool {
329
+ for _, r := range s {
330
+ if r < '0' || r > '9' {
331
+ return false
332
+ }
333
+ }
334
+ return s != ""
335
+ }
336
+
337
+ // 格式化模型名
338
+ func formatModelName(name string) string {
339
+ if name == "" {
340
+ return ""
341
+ }
342
+ parts := strings.Split(name, "-")
343
+ if len(parts) == 1 {
344
+ return strings.ToUpper(parts[0])
345
+ }
346
+ formatted := []string{strings.ToUpper(parts[0])}
347
+ for _, p := range parts[1:] {
348
+ if p == "" {
349
+ formatted = append(formatted, "")
350
+ } else if isDigit(p) {
351
+ formatted = append(formatted, p)
352
+ } else if hasEnglishLetter(p) {
353
+ // Use Title for better capitalization of letters
354
+ formatted = append(formatted, strings.Title(p))
355
+ } else {
356
+ formatted = append(formatted, p)
357
+ }
358
+ }
359
+ return strings.Join(formatted, "-")
360
+ }
361
+
362
+ // 获取模型列表 (新增)
363
+ func getModels() []Model {
364
+ modelsMutex.RLock()
365
+ cachedModels := modelsCache
366
+ modelsMutex.RUnlock()
367
+
368
+ if cachedModels != nil {
369
+ return cachedModels
370
+ }
371
+
372
+ // 获取token
373
+ token := ZAI_TOKEN
374
+ if ANON_TOKEN_ENABLED {
375
+ if t, err := getAnonymousToken(); err == nil {
376
+ token = t
377
+ }
378
+ }
379
+
380
+ client := &http.Client{
381
+ Timeout: 10 * time.Second,
382
+ Transport: &http.Transport{
383
+ MaxIdleConns: 10,
384
+ IdleConnTimeout: 30 * time.Second,
385
+ DisableCompression: false,
386
+ },
387
+ }
388
+
389
+ req, err := http.NewRequest("GET", MODELS_URL, nil)
390
+ if err != nil {
391
+ debugLog("创建模型请求失败: %v", err)
392
+ return getDefaultModels()
393
+ }
394
+
395
+ // 设置请求头
396
+ req.Header.Set("User-Agent", BROWSER_UA)
397
+ req.Header.Set("Accept", "*/*")
398
+ req.Header.Set("Accept-Language", "zh-CN,zh;q=0.9")
399
+ req.Header.Set("X-FE-Version", X_FE_VERSION)
400
+ req.Header.Set("sec-ch-ua", SEC_CH_UA)
401
+ req.Header.Set("sec-ch-ua-mobile", SEC_CH_UA_MOB)
402
+ req.Header.Set("sec-ch-ua-platform", SEC_CH_UA_PLAT)
403
+ req.Header.Set("Origin", ORIGIN_BASE)
404
+ if token != "" {
405
+ req.Header.Set("Authorization", "Bearer "+token)
406
+ }
407
+
408
+ resp, err := client.Do(req)
409
+ if err != nil {
410
+ debugLog("获取模型列表失败: %v", err)
411
+ return getDefaultModels()
412
+ }
413
+ defer func() {
414
+ if closeErr := resp.Body.Close(); closeErr != nil {
415
+ debugLog("关闭模型响应体失败: %v", closeErr)
416
+ }
417
+ }()
418
+
419
+ if resp.StatusCode != http.StatusOK {
420
+ debugLog("模型列表响应状态异常: %d", resp.StatusCode)
421
+ return getDefaultModels()
422
+ }
423
+
424
+ var upstreamResp UpstreamModelsResponse
425
+ if err := json.NewDecoder(resp.Body).Decode(&upstreamResp); err != nil {
426
+ debugLog("解析模型列表失败: %v", err)
427
+ return getDefaultModels()
428
+ }
429
+
430
+ var models []Model
431
+ for _, m := range upstreamResp.Data {
432
+ if !m.Info.IsActive {
433
+ continue
434
+ }
435
+
436
+ modelName := m.Name
437
+ if modelName == "" || !isEnglishLetter([]rune(modelName)[0]) { // 确保Name不为空且首字符是字母
438
+ modelName = formatModelName(m.ID)
439
+ }
440
+
441
+ models = append(models, Model{
442
+ ID: m.ID,
443
+ Object: "model",
444
+ Name: modelName, // 使用格式化后的名称或原始Name
445
+ Created: m.Info.CreatedAt, // 使用上游的CreatedAt
446
+ OwnedBy: "z.ai",
447
+ })
448
+ }
449
+
450
+ if len(models) == 0 {
451
+ return getDefaultModels()
452
+ }
453
+
454
+ // 更新缓存
455
+ modelsMutex.Lock()
456
+ modelsCache = models
457
+ modelsMutex.Unlock()
458
+
459
+ debugLog("获取到%d个模型", len(models))
460
+ return models
461
+ }
462
+
463
+ // 获取默认模型列表(获取失败时使用)
464
+ func getDefaultModels() []Model {
465
+ return []Model{
466
+ {
467
+ ID: "0727-360B-API", // 与DEFAULT_UPSTREAM_MODEL_ID一致
468
+ Object: "model",
469
+ Name: "GLM-4.5", // 或根据ID格式化
470
+ Created: time.Now().Unix(),
471
+ OwnedBy: "z.ai",
472
+ },
473
+ }
474
+ }
475
+
476
  // debug日志函数
477
  func debugLog(format string, args ...interface{}) {
478
  if DEBUG_MODE {
 
497
  req.Header.Set("sec-ch-ua-platform", SEC_CH_UA_PLAT)
498
  req.Header.Set("Origin", ORIGIN_BASE)
499
  req.Header.Set("Referer", ORIGIN_BASE+"/")
 
500
  resp, err := client.Do(req)
501
  if err != nil {
502
  return "", err
 
520
  func main() {
521
  // 初始化配置
522
  initConfig()
523
+ // 注册路由
524
+ http.HandleFunc("/v1/models", handleModels)
525
+ http.HandleFunc("/v1/chat/completions", handleChatCompletions)
526
+ http.HandleFunc("/docs", handleAPIDocs)
527
+ // http.HandleFunc("/", handleOptions)
528
+ http.HandleFunc("/", handleDashboard)
529
  http.HandleFunc("/api/v1/models", handleModels)
530
  http.HandleFunc("/api/v1/chat/completions", handleChatCompletions)
531
+ http.HandleFunc("/hf/v1/models", handleModels)
532
+ http.HandleFunc("/hf/v1/chat/completions", handleChatCompletions)
533
+ // Dashboard路由
534
+ if DASHBOARD_ENABLED {
535
+ http.HandleFunc("/dashboard", handleDashboard)
536
+ http.HandleFunc("/dashboard/stats", handleDashboardStats)
537
+ http.HandleFunc("/dashboard/requests", handleDashboardRequests)
538
+ log.Printf("Dashboard已启用,访问地址: http://localhost%s/dashboard", PORT)
539
+ }
540
  log.Printf("OpenAI兼容API服务器启动在端口%s", PORT)
541
+ log.Printf("模型: %s", MODEL_NAME) // 这里可能需要调整显示逻辑,因为现在模型是动态的
542
  log.Printf("上游: %s", UPSTREAM_URL)
543
  log.Printf("Debug模式: %v", DEBUG_MODE)
544
  log.Printf("默认流式响应: %v", DEFAULT_STREAM)
545
+ log.Printf("Dashboard启用: %v", DASHBOARD_ENABLED)
546
+ log.Printf("思考功能: %v", ENABLE_THINKING)
547
  log.Fatal(http.ListenAndServe(PORT, nil))
548
  }
549
 
550
+ // Dashboard页面处理器
551
+ func handleDashboard(w http.ResponseWriter, r *http.Request) {
552
+ // 只允许GET请求
553
+ if r.Method != "GET" {
554
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
555
+ return
556
+ }
557
+ // 简单的HTML模板
558
+ tmpl := `<!DOCTYPE html>
559
+ <html lang="zh-CN">
560
+ <head>
561
+ <meta charset="UTF-8">
562
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
563
+ <title>API调用看板</title>
564
+ <style>
565
+ body {
566
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
567
+ margin: 0;
568
+ padding: 20px;
569
+ background-color: #f5f5f5;
570
+ }
571
+ .container {
572
+ max-width: 1200px;
573
+ margin: 0 auto;
574
+ background-color: white;
575
+ border-radius: 8px;
576
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
577
+ padding: 20px;
578
+ }
579
+ h1 {
580
+ color: #333;
581
+ text-align: center;
582
+ margin-bottom: 30px;
583
+ }
584
+ .stats-container {
585
+ display: grid;
586
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
587
+ gap: 20px;
588
+ margin-bottom: 30px;
589
+ }
590
+ .stat-card {
591
+ background-color: #f8f9fa;
592
+ border-radius: 6px;
593
+ padding: 15px;
594
+ text-align: center;
595
+ box-shadow: 0 1px 3px rgba(0,0,0,0.1);
596
+ }
597
+ .stat-value {
598
+ font-size: 24px;
599
+ font-weight: bold;
600
+ color: #007bff;
601
+ }
602
+ .stat-label {
603
+ font-size: 14px;
604
+ color: #6c757d;
605
+ margin-top: 5px;
606
+ }
607
+ .requests-container {
608
+ margin-top: 30px;
609
+ }
610
+ .requests-table {
611
+ width: 100%;
612
+ border-collapse: collapse;
613
+ }
614
+ .requests-table th, .requests-table td {
615
+ padding: 10px;
616
+ text-align: left;
617
+ border-bottom: 1px solid #ddd;
618
+ }
619
+ .requests-table th {
620
+ background-color: #f8f9fa;
621
+ }
622
+ .status-success {
623
+ color: #28a745;
624
+ }
625
+ .status-error {
626
+ color: #dc3545;
627
+ }
628
+ .refresh-info {
629
+ text-align: center;
630
+ margin-top: 20px;
631
+ color: #007bff;
632
+ font-size: 14px;
633
+ }
634
+ .pagination-container {
635
+ display: flex;
636
+ justify-content: center;
637
+ align-items: center;
638
+ margin-top: 20px;
639
+ gap: 10px;
640
+ }
641
+ .pagination-container button {
642
+ padding: 5px 10px;
643
+ background-color: #007bff;
644
+ color: white;
645
+ border: none;
646
+ border-radius: 4px;
647
+ cursor: pointer;
648
+ }
649
+ .pagination-container button:disabled {
650
+ background-color: #cccccc;
651
+ cursor: not-allowed;
652
+ }
653
+ .pagination-container button:hover:not(:disabled) {
654
+ background-color: #0056b3;
655
+ }
656
+ .chart-container {
657
+ margin-top: 30px;
658
+ height: 300px;
659
+ background-color: #f8f9fa;
660
+ border-radius: 6px;
661
+ padding: 15px;
662
+ box-shadow: 0 1px 3px rgba(0,0,0,0.1);
663
+ }
664
+ </style>
665
+ </head>
666
+ <body>
667
+ <div class="container">
668
+ <h1>API调用看板</h1>
669
+ <div class="stats-container">
670
+ <div class="stat-card">
671
+ <div class="stat-value" id="total-requests">0</div>
672
+ <div class="stat-label">总请求数</div>
673
+ </div>
674
+ <div class="stat-card">
675
+ <div class="stat-value" id="successful-requests">0</div>
676
+ <div class="stat-label">成功请求</div>
677
+ </div>
678
+ <div class="stat-card">
679
+ <div class="stat-value" id="failed-requests">0</div>
680
+ <div class="stat-label">失败请求</div>
681
+ </div>
682
+ <div class="stat-card">
683
+ <div class="stat-value" id="avg-response-time">0s</div>
684
+ <div class="stat-label">平均响应时间</div>
685
+ </div>
686
+ </div>
687
+ <div class="chart-container">
688
+ <h2>请求统计图表</h2>
689
+ <canvas id="requestsChart"></canvas>
690
+ </div>
691
+ <div class="requests-container">
692
+ <h2>实时请求</h2>
693
+ <table class="requests-table">
694
+ <thead>
695
+ <tr>
696
+ <th>时间</th>
697
+ <th>模型</th>
698
+ <th>方法</th>
699
+ <th>状态</th>
700
+ <th>耗时</th>
701
+ <th>User Agent</th>
702
+ </tr>
703
+ </thead>
704
+ <tbody id="requests-tbody">
705
+ <!-- 请求记录将通过JavaScript动态添加 -->
706
+ </tbody>
707
+ </table>
708
+ <div class="pagination-container">
709
+ <button id="prev-page" disabled>上一页</button>
710
+ <span id="page-info">第 1 页,共 1 页</span>
711
+ <button id="next-page" disabled>下一页</button>
712
+ </div>
713
+ </div>
714
+ <div class="refresh-info">
715
+ 数据每5秒自动刷新一次
716
+ </div>
717
+ </div>
718
+ <script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
719
+ <script>
720
+ // 全局变量
721
+ let allRequests = [];
722
+ let currentPage = 1;
723
+ const itemsPerPage = 10;
724
+ let requestsChart = null;
725
+ // 更新统计数据
726
+ function updateStats() {
727
+ fetch('/dashboard/stats')
728
+ .then(response => response.json())
729
+ .then(data => {
730
+ document.getElementById('total-requests').textContent = data.TotalRequests;
731
+ document.getElementById('successful-requests').textContent = data.SuccessfulRequests;
732
+ document.getElementById('failed-requests').textContent = data.FailedRequests;
733
+ document.getElementById('avg-response-time').textContent = (data.AverageResponseTime / 1000000000).toFixed(2) + 's';
734
+ })
735
+ .catch(error => console.error('Error fetching stats:', error));
736
+ }
737
+ // 更新请求列表
738
+ function updateRequests() {
739
+ fetch('/dashboard/requests')
740
+ .then(response => response.json())
741
+ .then(data => {
742
+ // 检查数据是否为数组
743
+ if (!Array.isArray(data)) {
744
+ console.error('返回的数据不是数组:', data);
745
+ return;
746
+ }
747
+ // 保存所有请求数据
748
+ allRequests = data;
749
+ // 按时间倒序排列
750
+ allRequests.sort((a, b) => {
751
+ const timeA = new Date(a.timestamp);
752
+ const timeB = new Date(b.timestamp);
753
+ return timeB - timeA;
754
+ });
755
+ // 更新表格
756
+ updateTable();
757
+ // 更新图表
758
+ updateChart();
759
+ // 更新分页信息
760
+ updatePagination();
761
+ })
762
+ .catch(error => console.error('Error fetching requests:', error));
763
+ }
764
+ // 更新表格显示
765
+ function updateTable() {
766
+ const tbody = document.getElementById('requests-tbody');
767
+ tbody.innerHTML = '';
768
+ // 计算当前页的数据范围
769
+ const startIndex = (currentPage - 1) * itemsPerPage;
770
+ const endIndex = startIndex + itemsPerPage;
771
+ const currentRequests = allRequests.slice(startIndex, endIndex);
772
+ currentRequests.forEach(request => {
773
+ const row = document.createElement('tr');
774
+ // 格式化时间 - 检查时间戳是否有效
775
+ let timeStr = "Invalid Date";
776
+ if (request.timestamp) {
777
+ try {
778
+ const time = new Date(request.timestamp);
779
+ if (!isNaN(time.getTime())) {
780
+ timeStr = time.toLocaleTimeString();
781
+ }
782
+ } catch (e) {
783
+ console.error("时间格式化错误:", e);
784
+ }
785
+ }
786
+ // 状态样式
787
+ const statusClass = request.status >= 200 && request.status < 300 ? 'status-success' : 'status-error';
788
+ // 截断 User Agent,避免过长
789
+ let userAgent = request.user_agent || "undefined";
790
+ if (userAgent.length > 30) {
791
+ userAgent = userAgent.substring(0, 30) + "...";
792
+ }
793
+ row.innerHTML = "<td>" + timeStr + "</td>" + "<td>GLM-4.5</td>" + "<td>" + (request.method || "undefined") + "</td>" + "<td class=\"" + statusClass + "\">" + (request.status || "undefined") + "</td>" + "<td>" + ((request.duration / 1000).toFixed(2) || "undefined") + "s</td>" + "<td title=\"" + (request.user_agent || "") + "\">" + userAgent + "</td>";
794
+ tbody.appendChild(row);
795
+ });
796
+ }
797
+ // 更新分页信息
798
+ function updatePagination() {
799
+ const totalPages = Math.ceil(allRequests.length / itemsPerPage);
800
+ document.getElementById('page-info').textContent = "第 " + currentPage + " 页,共 " + totalPages + " 页";
801
+ document.getElementById('prev-page').disabled = currentPage <= 1;
802
+ document.getElementById('next-page').disabled = currentPage >= totalPages;
803
+ }
804
+ // 更新图表
805
+ function updateChart() {
806
+ const ctx = document.getElementById('requestsChart').getContext('2d');
807
+ // 准备图表数据 - 最近20条请求的响应时间
808
+ const chartData = allRequests.slice(0, 20).reverse();
809
+ const labels = chartData.map(req => {
810
+ const time = new Date(req.timestamp);
811
+ return time.toLocaleTimeString();
812
+ });
813
+ const responseTimes = chartData.map(req => req.duration);
814
+ // 如果图表已存在,先销毁
815
+ if (requestsChart) {
816
+ requestsChart.destroy();
817
+ }
818
+ // 创建新图表
819
+ requestsChart = new Chart(ctx, {
820
+ type: 'line',
821
+ data: {
822
+ labels: labels,
823
+ datasets: [{
824
+ label: '响应时间 (s)',
825
+ data: responseTimes.map(time => time / 1000),
826
+ borderColor: '#007bff',
827
+ backgroundColor: 'rgba(0, 123, 255, 0.1)',
828
+ tension: 0.1,
829
+ fill: true
830
+ }]
831
+ },
832
+ options: {
833
+ responsive: true,
834
+ maintainAspectRatio: false,
835
+ scales: {
836
+ y: {
837
+ beginAtZero: true,
838
+ title: {
839
+ display: true,
840
+ text: '响应时间 (s)'
841
+ }
842
+ },
843
+ x: {
844
+ title: {
845
+ display: true,
846
+ text: '时间'
847
+ }
848
+ }
849
+ },
850
+ plugins: {
851
+ title: {
852
+ display: true,
853
+ text: '最近20条请求的响应时间趋势 (s)'
854
+ }
855
+ }
856
+ }
857
+ });
858
+ }
859
+ // 分页按钮事件
860
+ document.getElementById('prev-page').addEventListener('click', function() {
861
+ if (currentPage > 1) {
862
+ currentPage--;
863
+ updateTable();
864
+ updatePagination();
865
+ }
866
+ });
867
+ document.getElementById('next-page').addEventListener('click', function() {
868
+ const totalPages = Math.ceil(allRequests.length / itemsPerPage);
869
+ if (currentPage < totalPages) {
870
+ currentPage++;
871
+ updateTable();
872
+ updatePagination();
873
+ }
874
+ });
875
+ // 初始加载
876
+ updateStats();
877
+ updateRequests();
878
+ // 定时刷新
879
+ setInterval(updateStats, 5000);
880
+ setInterval(updateRequests, 5000);
881
+ </script>
882
+ </body>
883
+ </html>`
884
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
885
+ fmt.Fprint(w, tmpl)
886
+ }
887
+
888
+ // Dashboard统计数据处理器
889
+ func handleDashboardStats(w http.ResponseWriter, r *http.Request) {
890
+ w.Header().Set("Content-Type", "application/json")
891
+ w.Write(getStatsData())
892
+ }
893
+
894
+ // Dashboard请求数据处理器
895
+ func handleDashboardRequests(w http.ResponseWriter, r *http.Request) {
896
+ w.Header().Set("Content-Type", "application/json")
897
+ w.Write(getLiveRequestsData())
898
+ }
899
+
900
+ // API文档页面处理器
901
+ func handleAPIDocs(w http.ResponseWriter, r *http.Request) {
902
+ // 只允许GET请求
903
+ if r.Method != "GET" {
904
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
905
+ return
906
+ }
907
+ // API文档HTML模板
908
+ tmpl := `<!DOCTYPE html>
909
+ <html lang="zh-CN">
910
+ <head>
911
+ <meta charset="UTF-8">
912
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
913
+ <title>ZtoApi 文档</title>
914
+ <style>
915
+ body {
916
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
917
+ margin: 0;
918
+ padding: 20px;
919
+ background-color: #f5f5f5;
920
+ line-height: 1.6;
921
+ }
922
+ .container {
923
+ max-width: 1200px;
924
+ margin: 0 auto;
925
+ background-color: white;
926
+ border-radius: 8px;
927
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
928
+ padding: 30px;
929
+ }
930
+ h1 {
931
+ color: #333;
932
+ text-align: center;
933
+ margin-bottom: 30px;
934
+ border-bottom: 2px solid #007bff;
935
+ padding-bottom: 10px;
936
+ }
937
+ h2 {
938
+ color: #007bff;
939
+ margin-top: 30px;
940
+ margin-bottom: 15px;
941
+ }
942
+ h3 {
943
+ color: #333;
944
+ margin-top: 25px;
945
+ margin-bottom: 10px;
946
+ }
947
+ .endpoint {
948
+ background-color: #f8f9fa;
949
+ border-radius: 6px;
950
+ padding: 15px;
951
+ margin-bottom: 20px;
952
+ border-left: 4px solid #007bff;
953
+ }
954
+ .method {
955
+ display: inline-block;
956
+ padding: 4px 8px;
957
+ border-radius: 4px;
958
+ color: white;
959
+ font-weight: bold;
960
+ margin-right: 10px;
961
+ font-size: 14px;
962
+ }
963
+ .get { background-color: #28a745; }
964
+ .post { background-color: #007bff; }
965
+ .path {
966
+ font-family: monospace;
967
+ background-color: #e9ecef;
968
+ padding: 2px 6px;
969
+ border-radius: 3px;
970
+ font-size: 16px;
971
+ }
972
+ .description {
973
+ margin: 15px 0;
974
+ }
975
+ .parameters {
976
+ margin: 15px 0;
977
+ }
978
+ table {
979
+ width: 100%;
980
+ border-collapse: collapse;
981
+ margin: 15px 0;
982
+ }
983
+ th, td {
984
+ padding: 10px;
985
+ text-align: left;
986
+ border-bottom: 1px solid #ddd;
987
+ }
988
+ th {
989
+ background-color: #f8f9fa;
990
+ font-weight: bold;
991
+ }
992
+ .example {
993
+ background-color: #f8f9fa;
994
+ border-radius: 6px;
995
+ padding: 15px;
996
+ margin: 15px 0;
997
+ font-family: monospace;
998
+ white-space: pre-wrap;
999
+ overflow-x: auto;
1000
+ }
1001
+ .note {
1002
+ background-color: #fff3cd;
1003
+ border-left: 4px solid #ffc107;
1004
+ padding: 10px 15px;
1005
+ margin: 15px 0;
1006
+ border-radius: 0 4px 4px 0;
1007
+ }
1008
+ .response {
1009
+ background-color: #f8f9fa;
1010
+ border-radius: 6px;
1011
+ padding: 15px;
1012
+ margin: 15px 0;
1013
+ font-family: monospace;
1014
+ white-space: pre-wrap;
1015
+ overflow-x: auto;
1016
+ }
1017
+ .tab {
1018
+ overflow: hidden;
1019
+ border: 1px solid #ccc;
1020
+ background-color: #f1f1f1;
1021
+ border-radius: 4px 4px 0 0;
1022
+ }
1023
+ .tab button {
1024
+ background-color: inherit;
1025
+ float: left;
1026
+ border: none;
1027
+ outline: none;
1028
+ cursor: pointer;
1029
+ padding: 14px 16px;
1030
+ transition: 0.3s;
1031
+ font-size: 16px;
1032
+ }
1033
+ .tab button:hover {
1034
+ background-color: #ddd;
1035
+ }
1036
+ .tab button.active {
1037
+ background-color: #ccc;
1038
+ }
1039
+ .tabcontent {
1040
+ display: none;
1041
+ padding: 6px 12px;
1042
+ border: 1px solid #ccc;
1043
+ border-top: none;
1044
+ border-radius: 0 0 4px 4px;
1045
+ }
1046
+ .toc {
1047
+ background-color: #f8f9fa;
1048
+ border-radius: 6px;
1049
+ padding: 15px;
1050
+ margin-bottom: 20px;
1051
+ }
1052
+ .toc ul {
1053
+ padding-left: 20px;
1054
+ }
1055
+ .toc li {
1056
+ margin: 5px 0;
1057
+ }
1058
+ .toc a {
1059
+ color: #007bff;
1060
+ text-decoration: none;
1061
+ }
1062
+ .toc a:hover {
1063
+ text-decoration: underline;
1064
+ }
1065
+ </style>
1066
+ </head>
1067
+ <body>
1068
+ <div class="container">
1069
+ <h1>ZtoApi 文档</h1>
1070
+ <div class="toc">
1071
+ <h2>目录</h2>
1072
+ <ul>
1073
+ <li><a href="#overview">概述</a></li>
1074
+ <li><a href="#authentication">身份验证</a></li>
1075
+ <li><a href="#endpoints">API端点</a>
1076
+ <ul>
1077
+ <li><a href="#models">获取模型列表</a></li>
1078
+ <li><a href="#chat-completions">聊天完成</a></li>
1079
+ </ul>
1080
+ </li>
1081
+ <li><a href="#examples">使用示例</a></li>
1082
+ <li><a href="#error-handling">错误处理</a></li>
1083
+ </ul>
1084
+ </div>
1085
+ <section id="overview">
1086
+ <h2>概述</h2>
1087
+ <p>这是一个为Z.ai GLM-4.5模型提供OpenAI兼容API接口的代理服务器。它允许你使用标准的OpenAI API格式与Z.ai的GLM-4.5模型进行交互,支持流式和非流式响应。</p>
1088
+ <p><strong>基础URL:</strong> <code>http://localhost:7860/v1</code></p>
1089
+ <div class="note">
1090
+ <strong>注意:</strong> 默认端口为7860,可以通过环境变量PORT进行修改。
1091
+ </div>
1092
+ </section>
1093
+ <section id="authentication">
1094
+ <h2>身份验证</h2>
1095
+ <p>所有API请求都需要在请求头中包含有效的API密钥进行身份验证:</p>
1096
+ <div class="example">
1097
+ Authorization: Bearer your-api-key</div>
1098
+ <p>默认的API密钥为 <code>sk-your-key</code>,可以通过环境变量 <code>DEFAULT_KEY</code> 进行修改。</p>
1099
+ </section>
1100
+ <section id="endpoints">
1101
+ <h2>API端点</h2>
1102
+ <div class="endpoint" id="models">
1103
+ <h3>获取模型列表</h3>
1104
+ <div>
1105
+ <span class="method get">GET</span>
1106
+ <span class="path">/v1/models</span>
1107
+ </div>
1108
+ <div class="description">
1109
+ <p>获取可用模型列表。</p>
1110
+ </div>
1111
+ <div class="parameters">
1112
+ <h4>请求参数</h4>
1113
+ <p>无</p>
1114
+ </div>
1115
+ <div class="response">
1116
+ {
1117
+ "object": "list",
1118
+ "data": [
1119
+ {
1120
+ "id": "GLM-4.5",
1121
+ "object": "model",
1122
+ "created": 1756788845,
1123
+ "owned_by": "z.ai"
1124
+ }
1125
+ ]
1126
+ }</div>
1127
+ </div>
1128
+ <div class="endpoint" id="chat-completions">
1129
+ <h3>聊天完成</h3>
1130
+ <div>
1131
+ <span class="method post">POST</span>
1132
+ <span class="path">/v1/chat/completions</span>
1133
+ </div>
1134
+ <div class="description">
1135
+ <p>基于消息列表生成模型响应。支持流式和非流式两种模式。</p>
1136
+ </div>
1137
+ <div class="parameters">
1138
+ <h4>请求参数</h4>
1139
+ <table>
1140
+ <thead>
1141
+ <tr>
1142
+ <th>参数名</th>
1143
+ <th>类型</th>
1144
+ <th>必需</th>
1145
+ <th>说明</th>
1146
+ </tr>
1147
+ </thead>
1148
+ <tbody>
1149
+ <tr>
1150
+ <td>model</td>
1151
+ <td>string</td>
1152
+ <td>是</td>
1153
+ <td>要使用的模型ID,例如 "GLM-4.5"</td>
1154
+ </tr>
1155
+ <tr>
1156
+ <td>messages</td>
1157
+ <td>array</td>
1158
+ <td>是</td>
1159
+ <td>消息列表,包含角色和内容</td>
1160
+ </tr>
1161
+ <tr>
1162
+ <td>stream</td>
1163
+ <td>boolean</td>
1164
+ <td>否</td>
1165
+ <td>是否使用流式响应,默认为true</td>
1166
+ </tr>
1167
+ <tr>
1168
+ <td>temperature</td>
1169
+ <td>number</td>
1170
+ <td>否</td>
1171
+ <td>采样温度,控制随机性</td>
1172
+ </tr>
1173
+ <tr>
1174
+ <td>max_tokens</td>
1175
+ <td>integer</td>
1176
+ <td>否</td>
1177
+ <td>生成的最大令牌数</td>
1178
+ </tr>
1179
+ <tr>
1180
+ <td>enable_thinking</td>
1181
+ <td>boolean</td>
1182
+ <td>否</td>
1183
+ <td>是否启用思考功能,默认使用环境变量 ENABLE_THINKING 的值</td>
1184
+ </tr>
1185
+ </tbody>
1186
+ </table>
1187
+ </div>
1188
+ <div class="parameters">
1189
+ <h4>消息格式</h4>
1190
+ <table>
1191
+ <thead>
1192
+ <tr>
1193
+ <th>字段</th>
1194
+ <th>类型</th>
1195
+ <th>说明</th>
1196
+ </tr>
1197
+ </thead>
1198
+ <tbody>
1199
+ <tr>
1200
+ <td>role</td>
1201
+ <td>string</td>
1202
+ <td>消息角色,可选值:system、user、assistant</td>
1203
+ </tr>
1204
+ <tr>
1205
+ <td>content</td>
1206
+ <td>string</td>
1207
+ <td>消息内容</td>
1208
+ </tr>
1209
+ </tbody>
1210
+ </table>
1211
+ </div>
1212
+ </div>
1213
+ </section>
1214
+ <section id="examples">
1215
+ <h2>使用示例</h2>
1216
+ <div class="tab">
1217
+ <button class="tablinks active" onclick="openTab(event, 'python-tab')">Python</button>
1218
+ <button class="tablinks" onclick="openTab(event, 'curl-tab')">cURL</button>
1219
+ <button class="tablinks" onclick="openTab(event, 'javascript-tab')">JavaScript</button>
1220
+ </div>
1221
+ <div id="python-tab" class="tabcontent" style="display: block;">
1222
+ <h3>Python示例</h3>
1223
+ <div class="example">
1224
+ import openai
1225
+ # 配置客户端
1226
+ client = openai.OpenAI(
1227
+ api_key="your-api-key", # 对应 DEFAULT_KEY
1228
+ base_url="http://localhost:7860/v1"
1229
+ )
1230
+ # 非流式请求
1231
+ response = client.chat.completions.create(
1232
+ model="GLM-4.5",
1233
+ messages=[{"role": "user", "content": "你好,请介绍一下自己"}]
1234
+ )
1235
+ print(response.choices[0].message.content)
1236
+ # 流式请求
1237
+ response = client.chat.completions.create(
1238
+ model="GLM-4.5",
1239
+ messages=[{"role": "user", "content": "请写一首关于春天的诗"}],
1240
+ stream=True
1241
+ )
1242
+ for chunk in response:
1243
+ if chunk.choices[0].delta.content:
1244
+ print(chunk.choices[0].delta.content, end="")</div>
1245
+ </div>
1246
+ <div id="curl-tab" class="tabcontent">
1247
+ <h3>cURL示例</h3>
1248
+ <div class="example">
1249
+ # 非流式请求
1250
+ curl -X POST http://localhost:7860/v1/chat/completions \
1251
+ -H "Content-Type: application/json" \
1252
+ -H "Authorization: Bearer your-api-key" \
1253
+ -d '{
1254
+ "model": "GLM-4.5",
1255
+ "messages": [{"role": "user", "content": "你好"}],
1256
+ "stream": false
1257
+ }'
1258
+ # 流式请求
1259
+ curl -X POST http://localhost:7860/v1/chat/completions \
1260
+ -H "Content-Type: application/json" \
1261
+ -H "Authorization: Bearer your-api-key" \
1262
+ -d '{
1263
+ "model": "GLM-4.5",
1264
+ "messages": [{"role": "user", "content": "你好"}],
1265
+ "stream": true
1266
+ }'</div>
1267
+ # 启用思考功能的请求
1268
+ curl -X POST http://localhost:7860/v1/chat/completions \
1269
+ -H "Content-Type: application/json" \
1270
+ -H "Authorization: Bearer your-api-key" \
1271
+ -d '{
1272
+ "model": "GLM-4.5",
1273
+ "messages": [{"role": "user", "content": "请分析一下这个问题"}],
1274
+ "enable_thinking": true
1275
+ }'
1276
+ </div>
1277
+ <div id="javascript-tab" class="tabcontent">
1278
+ <h3>JavaScript示例</h3>
1279
+ <div class="example">
1280
+ const fetch = require('node-fetch');
1281
+ async function chatWithGLM(message, stream = false) {
1282
+ const response = await fetch('http://localhost:7860/v1/chat/completions', {
1283
+ method: 'POST',
1284
+ headers: {
1285
+ 'Content-Type': 'application/json',
1286
+ 'Authorization': 'Bearer your-api-key'
1287
+ },
1288
+ body: JSON.stringify({
1289
+ model: 'GLM-4.5',
1290
+ messages: [{ role: 'user', content: message }],
1291
+ stream: stream
1292
+ })
1293
+ });
1294
+ if (stream) {
1295
+ // 处理流式响应
1296
+ const reader = response.body.getReader();
1297
+ const decoder = new TextDecoder();
1298
+ while (true) {
1299
+ const { done, value } = await reader.read();
1300
+ if (done) break;
1301
+ const chunk = decoder.decode(value);
1302
+ const lines = chunk.split('\n');
1303
+ for (const line of lines) {
1304
+ if (line.startsWith('data: ')) {
1305
+ const data = line.slice(6);
1306
+ if (data === '[DONE]') {
1307
+ console.log('\n流式响应完成');
1308
+ return;
1309
+ }
1310
+ try {
1311
+ const parsed = JSON.parse(data);
1312
+ const content = parsed.choices[0]?.delta?.content;
1313
+ if (content) {
1314
+ process.stdout.write(content);
1315
+ }
1316
+ } catch (e) {
1317
+ // 忽略解析错误
1318
+ }
1319
+ }
1320
+ }
1321
+ }
1322
+ } else {
1323
+ // 处理非流式响应
1324
+ const data = await response.json();
1325
+ console.log(data.choices[0].message.content);
1326
+ }
1327
+ }
1328
+ // 使用示例
1329
+ chatWithGLM('你好,请介绍一下JavaScript', false);</div>
1330
+ </div>
1331
+ </section>
1332
+ <section id="error-handling">
1333
+ <h2>错误处理</h2>
1334
+ <p>API使用标准HTTP状态码来表示请求的成功或失败:</p>
1335
+ <table>
1336
+ <thead>
1337
+ <tr>
1338
+ <th>状态码</th>
1339
+ <th>说明</th>
1340
+ </tr>
1341
+ </thead>
1342
+ <tbody>
1343
+ <tr>
1344
+ <td>200 OK</td>
1345
+ <td>请求成功</td>
1346
+ </tr>
1347
+ <tr>
1348
+ <td>400 Bad Request</td>
1349
+ <td>请求格式错误或参数无效</td>
1350
+ </tr>
1351
+ <tr>
1352
+ <td>401 Unauthorized</td>
1353
+ <td>API密钥无效或缺失</td>
1354
+ </tr>
1355
+ <tr>
1356
+ <td>502 Bad Gateway</td>
1357
+ <td>上游服务错误</td>
1358
+ </tr>
1359
+ </tbody>
1360
+ </table>
1361
+ <div class="note">
1362
+ <strong>注意:</strong> 在调试模式下,服务器会输出详细的日志信息,可以通过设置环境变量 DEBUG_MODE=true 来启用。
1363
+ </div>
1364
+ </section>
1365
+ </div>
1366
+ <script>
1367
+ function openTab(evt, tabName) {
1368
+ var i, tabcontent, tablinks;
1369
+ tabcontent = document.getElementsByClassName("tabcontent");
1370
+ for (i = 0; i < tabcontent.length; i++) {
1371
+ tabcontent[i].style.display = "none";
1372
+ }
1373
+ tablinks = document.getElementsByClassName("tablinks");
1374
+ for (i = 0; i < tablinks.length; i++) {
1375
+ tablinks[i].className = tablinks[i].className.replace(" active", "");
1376
+ }
1377
+ document.getElementById(tabName).style.display = "block";
1378
+ evt.currentTarget.className += " active";
1379
+ }
1380
+ </script>
1381
+ </body>
1382
+ </html>`
1383
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
1384
+ fmt.Fprint(w, tmpl)
1385
+ }
1386
+
1387
  func handleOptions(w http.ResponseWriter, r *http.Request) {
1388
  setCORSHeaders(w)
1389
  if r.Method == "OPTIONS" {
 
1400
  w.Header().Set("Access-Control-Allow-Credentials", "true")
1401
  }
1402
 
1403
+ // 修改 handleModels 函数,调用 getModels
1404
  func handleModels(w http.ResponseWriter, r *http.Request) {
1405
  setCORSHeaders(w)
1406
  if r.Method == "OPTIONS" {
 
1410
 
1411
  response := ModelsResponse{
1412
  Object: "list",
1413
+ Data: getModels(), // 调用 getModels 获取列表
 
 
 
 
 
 
 
1414
  }
1415
 
1416
  w.Header().Set("Content-Type", "application/json")
 
1418
  }
1419
 
1420
  func handleChatCompletions(w http.ResponseWriter, r *http.Request) {
1421
+ startTime := time.Now()
1422
+ path := r.URL.Path
1423
+ clientIP := getClientIP(r)
1424
+ userAgent := r.UserAgent()
1425
  setCORSHeaders(w)
1426
  if r.Method == "OPTIONS" {
1427
  w.WriteHeader(http.StatusOK)
1428
  return
1429
  }
 
1430
  debugLog("收到chat completions请求")
1431
 
1432
+ // 验证API Key
1433
+ authHeader := r.Header.Get("Authorization")
1434
+ if !strings.HasPrefix(authHeader, "Bearer ") {
1435
+ debugLog("缺少或无效的Authorization头")
1436
+ http.Error(w, "Missing or invalid Authorization header", http.StatusUnauthorized)
1437
+ // 记录请求统计
1438
+ duration := time.Since(startTime)
1439
+ recordRequestStats(startTime, path, http.StatusUnauthorized)
1440
+ addLiveRequest(r.Method, path, http.StatusUnauthorized, duration, "", userAgent)
1441
+ return
1442
+ }
1443
+ //apiKey := strings.TrimPrefix(authHeader, "Bearer ")
1444
+ //if apiKey != DEFAULT_KEY {
1445
+ // debugLog("无效的API key: %s", apiKey)
1446
+ // http.Error(w, "Invalid API key", http.StatusUnauthorized)
1447
+ // // 记录请求统计
1448
+ // duration := time.Since(startTime)
1449
+ // recordRequestStats(startTime, path, http.StatusUnauthorized)
1450
+ // addLiveRequest(r.Method, path, http.StatusUnauthorized, duration, "", userAgent)
1451
+ // return
1452
+ //}
1453
+ //
1454
+ //debugLog("API key验证通过")
1455
  // 读取请求体
1456
  body, err := io.ReadAll(r.Body)
1457
  if err != nil {
1458
  debugLog("读取请求体失败: %v", err)
1459
  http.Error(w, "Failed to read request body", http.StatusBadRequest)
1460
+ // 记录请求统计
1461
+ duration := time.Since(startTime)
1462
+ recordRequestStats(startTime, path, http.StatusBadRequest)
1463
+ addLiveRequest(r.Method, path, http.StatusBadRequest, duration, "", userAgent)
1464
  return
1465
  }
 
1466
  // 解析请求
1467
  var req OpenAIRequest
1468
  if err := json.Unmarshal(body, &req); err != nil {
1469
  debugLog("JSON解析失败: %v", err)
1470
  http.Error(w, "Invalid JSON", http.StatusBadRequest)
1471
+ // 记录请求统计
1472
+ duration := time.Since(startTime)
1473
+ recordRequestStats(startTime, path, http.StatusBadRequest)
1474
+ addLiveRequest(r.Method, path, http.StatusBadRequest, duration, "", userAgent)
1475
  return
1476
  }
1477
 
1478
+ // --- 模型映射逻辑 ---
1479
+ models := getModels()
1480
+ modelExists := false
1481
+ for _, m := range models {
1482
+ if m.ID == req.Model {
1483
+ modelExists = true
1484
+ break
1485
+ }
1486
+ }
1487
+ actualUpstreamModelID := req.Model // 默认使用请求的模型ID
1488
+ if !modelExists {
1489
+ debugLog("未知模型 '%s',映射到默认上游模型 '%s'", req.Model, DEFAULT_UPSTREAM_MODEL_ID)
1490
+ actualUpstreamModelID = DEFAULT_UPSTREAM_MODEL_ID // 映射到默认模型
1491
+ }
1492
+ // --- 模型映射逻辑结束 ---
1493
+
1494
  // 如果客户端没有明确指定stream参数,使用默认值
1495
  if !bytes.Contains(body, []byte(`"stream"`)) {
1496
  req.Stream = DEFAULT_STREAM
1497
  debugLog("客户端未指定stream参数,使用默认值: %v", DEFAULT_STREAM)
1498
  }
1499
+ debugLog("请求解析成功 - 模型: %s (映射后: %s), 流式: %v, 消息数: %d", req.Model, actualUpstreamModelID, req.Stream, len(req.Messages))
 
1500
 
1501
  // 生成会话相关ID
1502
  chatID := fmt.Sprintf("%d-%d", time.Now().UnixNano(), time.Now().Unix())
1503
  msgID := fmt.Sprintf("%d", time.Now().UnixNano())
1504
 
1505
+ // 决定是否启用思考功能:优先使用请求参数,其次使用环境变量
1506
+ enableThinking := ENABLE_THINKING // 默认使用环境变量值
1507
+ if req.EnableThinking != nil {
1508
+ enableThinking = *req.EnableThinking
1509
+ debugLog("使用请求参数中的思考功能设置: %v", enableThinking)
1510
+ } else {
1511
+ debugLog("使用环境变量中的思考功能设置: %v", enableThinking)
1512
+ }
1513
+
1514
+ // 构造上游请求 - 使用映射后的模型ID
1515
  upstreamReq := UpstreamRequest{
1516
  Stream: true, // 总是使用流式从上游获取
1517
  ChatID: chatID,
1518
  ID: msgID,
1519
+ Model: actualUpstreamModelID, // 使用映射后的ID
1520
  Messages: req.Messages,
1521
  Params: map[string]interface{}{},
1522
  Features: map[string]interface{}{
1523
+ "enable_thinking": enableThinking,
1524
  },
1525
  BackgroundTasks: map[string]bool{
1526
  "title_generation": false,
 
1531
  ID string `json:"id"`
1532
  Name string `json:"name"`
1533
  OwnedBy string `json:"owned_by"`
1534
+ }{ID: actualUpstreamModelID, Name: req.Model, OwnedBy: "openai"}, // ModelItem.ID也用映射后���,Name可以保留原始请求的ID或按需设置
1535
  ToolServers: []string{},
1536
  Variables: map[string]string{
1537
  "{{USER_NAME}}": "User",
 
1541
  }
1542
 
1543
  // 选择本次对话使用的token
1544
+ authToken := ZAI_TOKEN
1545
  if ANON_TOKEN_ENABLED {
1546
  if t, err := getAnonymousToken(); err == nil {
1547
  authToken = t
 
1556
  }
1557
  }
1558
 
1559
+ // 调用上游API,传入原始请求的模型ID用于响应
1560
  if req.Stream {
1561
+ handleStreamResponseWithIDs(w, upstreamReq, chatID, authToken, startTime, path, clientIP, userAgent, req.Model) // 传入原始模型ID
1562
  } else {
1563
+ handleNonStreamResponseWithIDs(w, upstreamReq, chatID, authToken, startTime, path, clientIP, userAgent, req.Model) // 传入原始模型ID
1564
  }
1565
  }
1566
 
 
1570
  debugLog("上游请求序列化失败: %v", err)
1571
  return nil, err
1572
  }
 
1573
  debugLog("调用上游API: %s", UPSTREAM_URL)
1574
  debugLog("上游请求体: %s", string(reqBody))
 
1575
  req, err := http.NewRequest("POST", UPSTREAM_URL, bytes.NewBuffer(reqBody))
1576
  if err != nil {
1577
  debugLog("创建HTTP请求失败: %v", err)
1578
  return nil, err
1579
  }
 
1580
  req.Header.Set("Content-Type", "application/json")
1581
  req.Header.Set("Accept", "application/json, text/event-stream")
1582
  req.Header.Set("User-Agent", BROWSER_UA)
 
1588
  req.Header.Set("X-FE-Version", X_FE_VERSION)
1589
  req.Header.Set("Origin", ORIGIN_BASE)
1590
  req.Header.Set("Referer", ORIGIN_BASE+"/c/"+refererChatID)
 
1591
  client := &http.Client{Timeout: 60 * time.Second}
1592
  resp, err := client.Do(req)
1593
  if err != nil {
1594
  debugLog("上游请求失败: %v", err)
1595
  return nil, err
1596
  }
 
1597
  debugLog("上游响应状态: %d %s", resp.StatusCode, resp.Status)
1598
  return resp, nil
1599
  }
1600
 
1601
+ // 修改 handleStreamResponseWithIDs 函数,增加原始模型ID参数
1602
+ func handleStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequest, chatID string, authToken string, startTime time.Time, path string, clientIP, userAgent string, originalModelID string) { // 增加 originalModelID 参数
1603
+ debugLog("开始处理流式响应 (chat_id=%s, original_model=%s)", chatID, originalModelID)
1604
  resp, err := callUpstreamWithHeaders(upstreamReq, chatID, authToken)
1605
  if err != nil {
1606
  debugLog("调用上游失败: %v", err)
1607
  http.Error(w, "Failed to call upstream", http.StatusBadGateway)
1608
+ // 记录请求统计
1609
+ duration := time.Since(startTime)
1610
+ recordRequestStats(startTime, path, http.StatusBadGateway)
1611
+ addLiveRequest("POST", path, http.StatusBadGateway, duration, "", userAgent)
1612
  return
1613
  }
1614
  defer resp.Body.Close()
 
1615
  if resp.StatusCode != http.StatusOK {
1616
  debugLog("上游返回错误状态: %d", resp.StatusCode)
1617
  // 读取错误响应体
 
1620
  debugLog("上游错误响应: %s", string(body))
1621
  }
1622
  http.Error(w, "Upstream error", http.StatusBadGateway)
1623
+ // 记录请求统计
1624
+ duration := time.Since(startTime)
1625
+ recordRequestStats(startTime, path, http.StatusBadGateway)
1626
+ addLiveRequest("POST", path, http.StatusBadGateway, duration, "", userAgent)
1627
  return
1628
  }
1629
 
 
1646
  }
1647
  // 处理每行前缀 "> "(包括起始位置)
1648
  s = strings.TrimPrefix(s, "> ")
1649
+ s = strings.ReplaceAll(s, "\n> ", "\n") // <--- 修正换行符
1650
  return strings.TrimSpace(s)
1651
  }
1652
 
 
1654
  w.Header().Set("Content-Type", "text/event-stream")
1655
  w.Header().Set("Cache-Control", "no-cache")
1656
  w.Header().Set("Connection", "keep-alive")
 
1657
  flusher, ok := w.(http.Flusher)
1658
  if !ok {
1659
  http.Error(w, "Streaming unsupported", http.StatusInternalServerError)
1660
  return
1661
  }
1662
 
1663
+ // 发送第一个chunk(role),使用原始模型ID
1664
  firstChunk := OpenAIResponse{
1665
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
1666
  Object: "chat.completion.chunk",
1667
  Created: time.Now().Unix(),
1668
+ Model: originalModelID, // 使用原始模型ID
1669
  Choices: []Choice{
1670
  {
1671
  Index: 0,
 
1680
  debugLog("开始读取上游SSE流")
1681
  scanner := bufio.NewScanner(resp.Body)
1682
  lineCount := 0
 
1683
  for scanner.Scan() {
1684
  line := scanner.Text()
1685
  lineCount++
 
1686
  if !strings.HasPrefix(line, "data: ") {
1687
  continue
1688
  }
 
1689
  dataStr := strings.TrimPrefix(line, "data: ")
1690
  if dataStr == "" {
1691
  continue
1692
  }
 
1693
  debugLog("收到SSE数据 (第%d行): %s", lineCount, dataStr)
 
1694
  var upstreamData UpstreamData
1695
  if err := json.Unmarshal([]byte(dataStr), &upstreamData); err != nil {
1696
  debugLog("SSE数据解析失败: %v", err)
 
1712
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
1713
  Object: "chat.completion.chunk",
1714
  Created: time.Now().Unix(),
1715
+ Model: originalModelID, // 使用原始模型ID
1716
  Choices: []Choice{{Index: 0, Delta: Delta{}, FinishReason: "stop"}},
1717
  }
1718
  writeSSEChunk(w, endChunk)
1719
+ fmt.Fprintf(w, "data: [DONE]\n")
1720
  flusher.Flush()
1721
  break
1722
  }
 
1736
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
1737
  Object: "chat.completion.chunk",
1738
  Created: time.Now().Unix(),
1739
+ Model: originalModelID, // 使用原始模型ID
1740
  Choices: []Choice{
1741
  {
1742
  Index: 0,
 
1757
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
1758
  Object: "chat.completion.chunk",
1759
  Created: time.Now().Unix(),
1760
+ Model: originalModelID, // 使用原始模型ID
1761
  Choices: []Choice{
1762
  {
1763
  Index: 0,
 
1768
  }
1769
  writeSSEChunk(w, endChunk)
1770
  flusher.Flush()
 
1771
  // 发送[DONE]
1772
+ fmt.Fprintf(w, "data: [DONE]\n")
1773
  flusher.Flush()
1774
  debugLog("流式响应完成,共处理%d行", lineCount)
1775
  break
1776
  }
1777
  }
 
1778
  if err := scanner.Err(); err != nil {
1779
  debugLog("扫描器错误: %v", err)
1780
  }
1781
+
1782
+ // 记录成功请求统计
1783
+ duration := time.Since(startTime)
1784
+ recordRequestStats(startTime, path, http.StatusOK)
1785
+ addLiveRequest("POST", path, http.StatusOK, duration, "", userAgent)
1786
  }
1787
 
1788
  func writeSSEChunk(w http.ResponseWriter, chunk OpenAIResponse) {
1789
  data, _ := json.Marshal(chunk)
1790
+ fmt.Fprintf(w, "data: %s\n", data)
1791
  }
1792
 
1793
+ // 修改 handleNonStreamResponseWithIDs 函数,增加原始模型ID参数
1794
+ func handleNonStreamResponseWithIDs(w http.ResponseWriter, upstreamReq UpstreamRequest, chatID string, authToken string, startTime time.Time, path string, clientIP, userAgent string, originalModelID string) { // 增加 originalModelID 参数
1795
+ debugLog("开始处理非流式响应 (chat_id=%s, original_model=%s)", chatID, originalModelID)
1796
  resp, err := callUpstreamWithHeaders(upstreamReq, chatID, authToken)
1797
  if err != nil {
1798
  debugLog("调用上游失败: %v", err)
1799
  http.Error(w, "Failed to call upstream", http.StatusBadGateway)
1800
+ // 记录请求统计
1801
+ duration := time.Since(startTime)
1802
+ recordRequestStats(startTime, path, http.StatusBadGateway)
1803
+ addLiveRequest("POST", path, http.StatusBadGateway, duration, "", userAgent)
1804
  return
1805
  }
1806
  defer resp.Body.Close()
 
1807
  if resp.StatusCode != http.StatusOK {
1808
  debugLog("上游返回错误状态: %d", resp.StatusCode)
1809
  // 读取错误响应体
 
1812
  debugLog("上游错误响应: %s", string(body))
1813
  }
1814
  http.Error(w, "Upstream error", http.StatusBadGateway)
1815
+ // 记录请求统计
1816
+ duration := time.Since(startTime)
1817
+ recordRequestStats(startTime, path, http.StatusBadGateway)
1818
+ addLiveRequest("POST", path, http.StatusBadGateway, duration, "", userAgent)
1819
  return
1820
  }
1821
 
 
1823
  var fullContent strings.Builder
1824
  scanner := bufio.NewScanner(resp.Body)
1825
  debugLog("开始收集完整响应内容")
 
1826
  for scanner.Scan() {
1827
  line := scanner.Text()
1828
  if !strings.HasPrefix(line, "data: ") {
1829
  continue
1830
  }
 
1831
  dataStr := strings.TrimPrefix(line, "data: ")
1832
  if dataStr == "" {
1833
  continue
1834
  }
 
1835
  var upstreamData UpstreamData
1836
  if err := json.Unmarshal([]byte(dataStr), &upstreamData); err != nil {
1837
  continue
1838
  }
 
1839
  if upstreamData.Data.DeltaContent != "" {
1840
  out := upstreamData.Data.DeltaContent
1841
  if upstreamData.Data.Phase == "thinking" {
 
1855
  s = strings.ReplaceAll(s, "</details>", "")
1856
  }
1857
  s = strings.TrimPrefix(s, "> ")
1858
+ s = strings.ReplaceAll(s, "\n> ", "\n") // <--- 修正换行符
1859
  return strings.TrimSpace(s)
1860
  }(out)
1861
  }
 
1863
  fullContent.WriteString(out)
1864
  }
1865
  }
 
1866
  if upstreamData.Data.Done || upstreamData.Data.Phase == "done" {
1867
  debugLog("检测到完成信号,停止收集")
1868
  break
1869
  }
1870
  }
 
1871
  finalContent := fullContent.String()
1872
  debugLog("内容收集完成,最终长度: %d", len(finalContent))
1873
 
1874
+ // 构造完整响应,使用原始模型ID
1875
  response := OpenAIResponse{
1876
  ID: fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
1877
  Object: "chat.completion",
1878
  Created: time.Now().Unix(),
1879
+ Model: originalModelID, // 使用原始模型ID
1880
  Choices: []Choice{
1881
  {
1882
  Index: 0,
 
1893
  TotalTokens: 0,
1894
  },
1895
  }
 
1896
  w.Header().Set("Content-Type", "application/json")
1897
  json.NewEncoder(w).Encode(response)
1898
  debugLog("非流式响应发送完成")
1899
+
1900
+ // 记录成功请求统计
1901
+ duration := time.Since(startTime)
1902
+ recordRequestStats(startTime, path, http.StatusOK)
1903
+ addLiveRequest("POST", path, http.StatusOK, duration, "", userAgent)
1904
  }