nanoppa commited on
Commit
c1fbdad
·
verified ·
1 Parent(s): 2dfe2b1

Upload 26 files

Browse files
Dockerfile CHANGED
@@ -32,9 +32,6 @@ COPY . .
32
  RUN mkdir -p /app/logs /app/data/temp && \
33
  echo '{"ssoNormal": {}, "ssoSuper": {}}' > /app/data/token.json
34
 
35
- RUN chmod -R 777 /app
36
 
37
-
38
- EXPOSE 7860
39
-
40
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
32
  RUN mkdir -p /app/logs /app/data/temp && \
33
  echo '{"ssoNormal": {}, "ssoSuper": {}}' > /app/data/token.json
34
 
35
+ EXPOSE 8000
36
 
37
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
 
 
 
app/api/admin/manage.py CHANGED
@@ -24,6 +24,8 @@ router = APIRouter(tags=["管理"])
24
  # 常量定义
25
  STATIC_DIR = Path(__file__).parents[2] / "template"
26
  TEMP_DIR = Path(__file__).parents[3] / "data" / "temp"
 
 
27
  SESSION_EXPIRE_HOURS = 24
28
  BYTES_PER_KB = 1024
29
  BYTES_PER_MB = 1024 * 1024
@@ -250,7 +252,7 @@ async def admin_login(request: LoginRequest) -> LoginResponse:
250
 
251
 
252
  @router.post("/api/logout")
253
- async def admin_logout(authenticated: bool = Depends(verify_admin_session),
254
  authorization: Optional[str] = Header(None)) -> Dict[str, Any]:
255
  """
256
  管理员登出
@@ -277,7 +279,7 @@ async def admin_logout(authenticated: bool = Depends(verify_admin_session),
277
 
278
 
279
  @router.get("/api/tokens", response_model=TokenListResponse)
280
- async def list_tokens(authenticated: bool = Depends(verify_admin_session)) -> TokenListResponse:
281
  """
282
  获取所有Token列表
283
 
@@ -335,7 +337,7 @@ async def list_tokens(authenticated: bool = Depends(verify_admin_session)) -> To
335
 
336
  @router.post("/api/tokens/add")
337
  async def add_tokens(request: AddTokensRequest,
338
- authenticated: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
339
  """
340
  批量添加Token
341
 
@@ -370,7 +372,7 @@ async def add_tokens(request: AddTokensRequest,
370
 
371
  @router.post("/api/tokens/delete")
372
  async def delete_tokens(request: DeleteTokensRequest,
373
- authenticated: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
374
  """
375
  批量删除Token
376
 
@@ -404,7 +406,7 @@ async def delete_tokens(request: DeleteTokensRequest,
404
 
405
 
406
  @router.get("/api/settings")
407
- async def get_settings(authenticated: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
408
  """获取全局配置"""
409
  try:
410
  logger.debug("[Admin] 获取全局配置")
@@ -426,8 +428,15 @@ class UpdateSettingsRequest(BaseModel):
426
  grok_config: Optional[Dict[str, Any]] = None
427
 
428
 
 
 
 
 
 
 
 
429
  @router.post("/api/settings")
430
- async def update_settings(request: UpdateSettingsRequest, authenticated: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
431
  """更新全局配置"""
432
  try:
433
  import toml
@@ -482,21 +491,37 @@ def _format_size(size_bytes: int) -> str:
482
 
483
 
484
  @router.get("/api/cache/size")
485
- async def get_cache_size(authenticated: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
486
  """获取缓存大小"""
487
  try:
488
  logger.debug("[Admin] 开始获取缓存大小")
489
 
490
- if not TEMP_DIR.exists():
491
- logger.warning(f"[Admin] 缓存目录不存在: {TEMP_DIR}")
492
- return {"success": True, "data": {"size": "0 MB"}}
493
-
494
- # 计算目录大小
495
- total_size = _calculate_dir_size(TEMP_DIR)
496
- size_str = _format_size(total_size)
497
-
498
- logger.debug(f"[Admin] 缓存大小获取完成 - 大小: {size_str}")
499
- return {"success": True, "data": {"size": size_str}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
 
501
  except Exception as e:
502
  logger.error(f"[Admin] 获取缓存大小异常 - 错误: {str(e)}")
@@ -507,35 +532,50 @@ async def get_cache_size(authenticated: bool = Depends(verify_admin_session)) ->
507
 
508
 
509
  @router.post("/api/cache/clear")
510
- async def clear_cache(authenticated: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
511
- """清理缓存 - 删除所有临���文件"""
 
 
512
  try:
513
  logger.debug("[Admin] 开始清理缓存")
514
 
515
- if not TEMP_DIR.exists():
516
- logger.warning(f"[Admin] 缓存目录不存在: {TEMP_DIR}")
517
- return {
518
- "success": True,
519
- "message": "缓存目录不存在,无需清理",
520
- "data": {"deleted_count": 0}
521
- }
522
-
523
- # 删除所有文件
524
  deleted_count = 0
525
- for file_path in TEMP_DIR.iterdir():
526
- if file_path.is_file():
527
- try:
528
- file_path.unlink()
529
- deleted_count += 1
530
- logger.debug(f"[Admin] 删除缓存文件: {file_path.name}")
531
- except Exception as e:
532
- logger.error(f"[Admin] 删除缓存文件失败: {file_path.name}, 错误: {str(e)}")
533
-
534
- logger.debug(f"[Admin] 缓存清理完成 - 删除文件数量: {deleted_count}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535
  return {
536
  "success": True,
537
- "message": f"成功清理缓存,删除 {deleted_count} 个文件",
538
- "data": {"deleted_count": deleted_count}
 
 
 
 
539
  }
540
 
541
  except Exception as e:
@@ -546,8 +586,88 @@ async def clear_cache(authenticated: bool = Depends(verify_admin_session)) -> Di
546
  )
547
 
548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549
  @router.get("/api/stats")
550
- async def get_stats(authenticated: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
551
  """
552
  获取统计信息
553
 
 
24
  # 常量定义
25
  STATIC_DIR = Path(__file__).parents[2] / "template"
26
  TEMP_DIR = Path(__file__).parents[3] / "data" / "temp"
27
+ IMAGE_CACHE_DIR = TEMP_DIR / "image"
28
+ VIDEO_CACHE_DIR = TEMP_DIR / "video"
29
  SESSION_EXPIRE_HOURS = 24
30
  BYTES_PER_KB = 1024
31
  BYTES_PER_MB = 1024 * 1024
 
252
 
253
 
254
  @router.post("/api/logout")
255
+ async def admin_logout(_: bool = Depends(verify_admin_session),
256
  authorization: Optional[str] = Header(None)) -> Dict[str, Any]:
257
  """
258
  管理员登出
 
279
 
280
 
281
  @router.get("/api/tokens", response_model=TokenListResponse)
282
+ async def list_tokens(_: bool = Depends(verify_admin_session)) -> TokenListResponse:
283
  """
284
  获取所有Token列表
285
 
 
337
 
338
  @router.post("/api/tokens/add")
339
  async def add_tokens(request: AddTokensRequest,
340
+ _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
341
  """
342
  批量添加Token
343
 
 
372
 
373
  @router.post("/api/tokens/delete")
374
  async def delete_tokens(request: DeleteTokensRequest,
375
+ _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
376
  """
377
  批量删除Token
378
 
 
406
 
407
 
408
  @router.get("/api/settings")
409
+ async def get_settings(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
410
  """获取全局配置"""
411
  try:
412
  logger.debug("[Admin] 获取全局配置")
 
428
  grok_config: Optional[Dict[str, Any]] = None
429
 
430
 
431
+ class StreamTimeoutSettings(BaseModel):
432
+ """流式超时配置"""
433
+ stream_chunk_timeout: int = 120
434
+ stream_first_response_timeout: int = 30
435
+ stream_total_timeout: int = 600
436
+
437
+
438
  @router.post("/api/settings")
439
+ async def update_settings(request: UpdateSettingsRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
440
  """更新全局配置"""
441
  try:
442
  import toml
 
491
 
492
 
493
  @router.get("/api/cache/size")
494
+ async def get_cache_size(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
495
  """获取缓存大小"""
496
  try:
497
  logger.debug("[Admin] 开始获取缓存大小")
498
 
499
+ # 计算图片缓存大小
500
+ image_size = 0
501
+ if IMAGE_CACHE_DIR.exists():
502
+ image_size = _calculate_dir_size(IMAGE_CACHE_DIR)
503
+
504
+ # 计算视频缓存大小
505
+ video_size = 0
506
+ if VIDEO_CACHE_DIR.exists():
507
+ video_size = _calculate_dir_size(VIDEO_CACHE_DIR)
508
+
509
+ # 总大小
510
+ total_size = image_size + video_size
511
+
512
+ logger.debug(f"[Admin] 缓存大小获取完成 - 图片: {_format_size(image_size)}, 视频: {_format_size(video_size)}, 总计: {_format_size(total_size)}")
513
+
514
+ return {
515
+ "success": True,
516
+ "data": {
517
+ "image_size": _format_size(image_size),
518
+ "video_size": _format_size(video_size),
519
+ "total_size": _format_size(total_size),
520
+ "image_size_bytes": image_size,
521
+ "video_size_bytes": video_size,
522
+ "total_size_bytes": total_size
523
+ }
524
+ }
525
 
526
  except Exception as e:
527
  logger.error(f"[Admin] 获取缓存大小异常 - 错误: {str(e)}")
 
532
 
533
 
534
  @router.post("/api/cache/clear")
535
+ async def clear_cache(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
536
+ """清理缓存
537
+
538
+ 删除所有临时文件"""
539
  try:
540
  logger.debug("[Admin] 开始清理缓存")
541
 
 
 
 
 
 
 
 
 
 
542
  deleted_count = 0
543
+ image_count = 0
544
+ video_count = 0
545
+
546
+ # 清理图片缓存
547
+ if IMAGE_CACHE_DIR.exists():
548
+ for file_path in IMAGE_CACHE_DIR.iterdir():
549
+ if file_path.is_file():
550
+ try:
551
+ file_path.unlink()
552
+ image_count += 1
553
+ logger.debug(f"[Admin] 删除图片缓存: {file_path.name}")
554
+ except Exception as e:
555
+ logger.error(f"[Admin] 删除图片缓存失败: {file_path.name}, 错误: {str(e)}")
556
+
557
+ # 清理视频缓存
558
+ if VIDEO_CACHE_DIR.exists():
559
+ for file_path in VIDEO_CACHE_DIR.iterdir():
560
+ if file_path.is_file():
561
+ try:
562
+ file_path.unlink()
563
+ video_count += 1
564
+ logger.debug(f"[Admin] 删除视频缓存: {file_path.name}")
565
+ except Exception as e:
566
+ logger.error(f"[Admin] 删除视频缓存失败: {file_path.name}, 错误: {str(e)}")
567
+
568
+ deleted_count = image_count + video_count
569
+ logger.debug(f"[Admin] 缓存清理完成 - 图片: {image_count}, 视频: {video_count}, 总计: {deleted_count}")
570
+
571
  return {
572
  "success": True,
573
+ "message": f"成功清理缓存,删除图片 {image_count} 个,视频 {video_count} 个,共 {deleted_count} 个文件",
574
+ "data": {
575
+ "deleted_count": deleted_count,
576
+ "image_count": image_count,
577
+ "video_count": video_count
578
+ }
579
  }
580
 
581
  except Exception as e:
 
586
  )
587
 
588
 
589
+ @router.post("/api/cache/clear/images")
590
+ async def clear_image_cache(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
591
+ """清理图片缓存
592
+
593
+ 仅删除图片缓存文件"""
594
+ try:
595
+ logger.debug("[Admin] 开始清理图片缓存")
596
+
597
+ deleted_count = 0
598
+
599
+ # 清理图片缓存
600
+ if IMAGE_CACHE_DIR.exists():
601
+ for file_path in IMAGE_CACHE_DIR.iterdir():
602
+ if file_path.is_file():
603
+ try:
604
+ file_path.unlink()
605
+ deleted_count += 1
606
+ logger.debug(f"[Admin] 删除图片缓存: {file_path.name}")
607
+ except Exception as e:
608
+ logger.error(f"[Admin] 删除图片缓存失败: {file_path.name}, 错误: {str(e)}")
609
+
610
+ logger.debug(f"[Admin] 图片缓存清理完成 - 删除 {deleted_count} 个文件")
611
+
612
+ return {
613
+ "success": True,
614
+ "message": f"成功清理图片缓存,删除 {deleted_count} 个文件",
615
+ "data": {
616
+ "deleted_count": deleted_count,
617
+ "type": "images"
618
+ }
619
+ }
620
+
621
+ except Exception as e:
622
+ logger.error(f"[Admin] 清理图片缓存异常 - 错误: {str(e)}")
623
+ raise HTTPException(
624
+ status_code=500,
625
+ detail={"error": f"清理图片缓存失败: {str(e)}", "code": "IMAGE_CACHE_CLEAR_ERROR"}
626
+ )
627
+
628
+
629
+ @router.post("/api/cache/clear/videos")
630
+ async def clear_video_cache(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
631
+ """清理视频缓存
632
+
633
+ 仅删除视频缓存文件"""
634
+ try:
635
+ logger.debug("[Admin] 开始清理视频缓存")
636
+
637
+ deleted_count = 0
638
+
639
+ # 清理视频缓存
640
+ if VIDEO_CACHE_DIR.exists():
641
+ for file_path in VIDEO_CACHE_DIR.iterdir():
642
+ if file_path.is_file():
643
+ try:
644
+ file_path.unlink()
645
+ deleted_count += 1
646
+ logger.debug(f"[Admin] 删除视频缓存: {file_path.name}")
647
+ except Exception as e:
648
+ logger.error(f"[Admin] 删除视频缓存失败: {file_path.name}, 错误: {str(e)}")
649
+
650
+ logger.debug(f"[Admin] 视频缓存清理完成 - 删除 {deleted_count} 个文件")
651
+
652
+ return {
653
+ "success": True,
654
+ "message": f"成功清理视频缓存,删除 {deleted_count} 个文件",
655
+ "data": {
656
+ "deleted_count": deleted_count,
657
+ "type": "videos"
658
+ }
659
+ }
660
+
661
+ except Exception as e:
662
+ logger.error(f"[Admin] 清理视频缓存异常 - 错误: {str(e)}")
663
+ raise HTTPException(
664
+ status_code=500,
665
+ detail={"error": f"清理视频缓存失败: {str(e)}", "code": "VIDEO_CACHE_CLEAR_ERROR"}
666
+ )
667
+
668
+
669
  @router.get("/api/stats")
670
+ async def get_stats(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
671
  """
672
  获取统计信息
673
 
app/api/v1/chat.py CHANGED
@@ -22,21 +22,21 @@ router = APIRouter(prefix="/chat", tags=["聊天"])
22
  @router.post("/completions", response_model=None)
23
  async def chat_completions(
24
  request: OpenAIChatRequest,
25
- authenticated: Optional[str] = Depends(auth_manager.verify)
26
  ):
27
  """
28
  创建聊天补全
29
-
30
  兼容OpenAI聊天API的端点,支持流式和非流式响应。
31
-
32
  Args:
33
  request: OpenAI格式的聊天请求
34
- authenticated: 认证状态(由依赖注入
35
-
36
  Returns:
37
  OpenAIChatCompletionResponse: 非流式响应
38
  StreamingResponse: 流式响应
39
-
40
  Raises:
41
  HTTPException: 当请求处理失败时
42
  """
@@ -62,7 +62,7 @@ async def chat_completions(
62
  return result
63
 
64
  except GrokApiException as e:
65
- logger.error(f"[Chat] Grok API错误: {str(e)}", extra={"details": e.details})
66
  raise HTTPException(
67
  status_code=500,
68
  detail={
@@ -74,7 +74,7 @@ async def chat_completions(
74
  }
75
  )
76
  except Exception as e:
77
- logger.error(f"[Chat] 聊天请求处理失败: {str(e)}", exc_info=True)
78
  raise HTTPException(
79
  status_code=500,
80
  detail={
 
22
  @router.post("/completions", response_model=None)
23
  async def chat_completions(
24
  request: OpenAIChatRequest,
25
+ _: Optional[str] = Depends(auth_manager.verify)
26
  ):
27
  """
28
  创建聊天补全
29
+
30
  兼容OpenAI聊天API的端点,支持流式和非流式响应。
31
+
32
  Args:
33
  request: OpenAI格式的聊天请求
34
+ _: 认证依赖(自动验证
35
+
36
  Returns:
37
  OpenAIChatCompletionResponse: 非流式响应
38
  StreamingResponse: 流式响应
39
+
40
  Raises:
41
  HTTPException: 当请求处理失败时
42
  """
 
62
  return result
63
 
64
  except GrokApiException as e:
65
+ logger.error(f"[Chat] Grok API错误: {str(e)} - 详情: {e.details}")
66
  raise HTTPException(
67
  status_code=500,
68
  detail={
 
74
  }
75
  )
76
  except Exception as e:
77
+ logger.error(f"[Chat] 聊天请求处理失败: {str(e)}")
78
  raise HTTPException(
79
  status_code=500,
80
  detail={
app/api/v1/images.py CHANGED
@@ -4,7 +4,7 @@ from fastapi import APIRouter, HTTPException
4
  from fastapi.responses import FileResponse
5
 
6
  from app.core.logger import logger
7
- from app.services.grok.image_cache import image_cache_service
8
 
9
 
10
  router = APIRouter()
@@ -12,38 +12,47 @@ router = APIRouter()
12
 
13
  @router.get("/images/{img_path:path}")
14
  async def get_image(img_path: str):
15
- """获取缓存的图片
16
 
17
  Args:
18
- img_path: 图片路径,格式如 users-xxx-generated-xxx-image.jpg
19
 
20
  Returns:
21
- 图片文件响应
22
  """
23
  try:
24
  # 将路径转换回原始格式(短横线转斜杠)
25
  original_path = "/" + img_path.replace('-', '/')
26
 
27
- # 检查缓存否存在
28
- cache_path = image_cache_service.get_cached_image(original_path)
 
 
 
 
 
 
 
 
 
29
 
30
  if cache_path and cache_path.exists():
31
- logger.debug(f"[ImageAPI] 返回缓存图片: {cache_path}")
32
  return FileResponse(
33
  path=str(cache_path),
34
- media_type="image/jpeg",
35
  headers={
36
  "Cache-Control": "public, max-age=86400",
37
  "Access-Control-Allow-Origin": "*"
38
  }
39
  )
40
 
41
- # 图片不存在
42
- logger.warning(f"[ImageAPI] 图片未找到: {original_path}")
43
- raise HTTPException(status_code=404, detail="Image not found")
44
 
45
  except HTTPException:
46
  raise
47
  except Exception as e:
48
- logger.error(f"[ImageAPI] 获取图片失败: {e}")
49
  raise HTTPException(status_code=500, detail=str(e))
 
4
  from fastapi.responses import FileResponse
5
 
6
  from app.core.logger import logger
7
+ from app.services.grok.cache import image_cache_service, video_cache_service
8
 
9
 
10
  router = APIRouter()
 
12
 
13
  @router.get("/images/{img_path:path}")
14
  async def get_image(img_path: str):
15
+ """获取缓存的图片或视频
16
 
17
  Args:
18
+ img_path: 文件路径,格式如 users-xxx-generated-xxx-image.jpg 或 users-xxx-generated-xxx-video.mp4
19
 
20
  Returns:
21
+ 文件响应
22
  """
23
  try:
24
  # 将路径转换回原始格式(短横线转斜杠)
25
  original_path = "/" + img_path.replace('-', '/')
26
 
27
+ # 判断图片还是视频
28
+ is_video = any(original_path.lower().endswith(ext) for ext in ['.mp4', '.webm', '.mov', '.avi'])
29
+
30
+ if is_video:
31
+ # 检查视频缓存
32
+ cache_path = video_cache_service.get_cached_video(original_path)
33
+ media_type = "video/mp4"
34
+ else:
35
+ # 检查图片缓存
36
+ cache_path = image_cache_service.get_cached_image(original_path)
37
+ media_type = "image/jpeg"
38
 
39
  if cache_path and cache_path.exists():
40
+ logger.debug(f"[MediaAPI] 返回缓存文件: {cache_path}")
41
  return FileResponse(
42
  path=str(cache_path),
43
+ media_type=media_type,
44
  headers={
45
  "Cache-Control": "public, max-age=86400",
46
  "Access-Control-Allow-Origin": "*"
47
  }
48
  )
49
 
50
+ # 文件不存在
51
+ logger.warning(f"[MediaAPI] 文件未找到: {original_path}")
52
+ raise HTTPException(status_code=404, detail="File not found")
53
 
54
  except HTTPException:
55
  raise
56
  except Exception as e:
57
+ logger.error(f"[MediaAPI] 获取文件失败: {e}")
58
  raise HTTPException(status_code=500, detail=str(e))
app/api/v1/models.py CHANGED
@@ -19,15 +19,15 @@ router = APIRouter(tags=["模型"])
19
 
20
 
21
  @router.get("/models")
22
- async def list_models(authenticated: Optional[str] = Depends(auth_manager.verify)) -> Dict[str, Any]:
23
  """
24
  获取可用模型列表
25
-
26
  返回 OpenAI 兼容的模型列表格式,包含系统支持的所有 Grok 模型的详细信息。
27
-
28
  Args:
29
- authenticated: 认证状态(由依赖注入
30
-
31
  Returns:
32
  Dict[str, Any]: 包含模型列表的响应数据
33
  """
@@ -85,14 +85,14 @@ async def list_models(authenticated: Optional[str] = Depends(auth_manager.verify
85
 
86
 
87
  @router.get("/models/{model_id}")
88
- async def get_model(model_id: str, authenticated: Optional[str] = Depends(auth_manager.verify)) -> Dict[str, Any]:
89
  """
90
  获取特定模型信息
91
-
92
  Args:
93
  model_id (str): 模型ID
94
- authenticated: 认证状态(由依赖注入
95
-
96
  Returns:
97
  Dict[str, Any]: 模型详细信息
98
  """
 
19
 
20
 
21
  @router.get("/models")
22
+ async def list_models(_: Optional[str] = Depends(auth_manager.verify)) -> Dict[str, Any]:
23
  """
24
  获取可用模型列表
25
+
26
  返回 OpenAI 兼容的模型列表格式,包含系统支持的所有 Grok 模型的详细信息。
27
+
28
  Args:
29
+ _: 认证依赖(自动验证
30
+
31
  Returns:
32
  Dict[str, Any]: 包含模型列表的响应数据
33
  """
 
85
 
86
 
87
  @router.get("/models/{model_id}")
88
+ async def get_model(model_id: str, _: Optional[str] = Depends(auth_manager.verify)) -> Dict[str, Any]:
89
  """
90
  获取特定模型信息
91
+
92
  Args:
93
  model_id (str): 模型ID
94
+ _: 认证依赖(自动验证
95
+
96
  Returns:
97
  Dict[str, Any]: 模型详细信息
98
  """
app/core/exception.py CHANGED
@@ -101,7 +101,7 @@ async def grok_api_exception_handler(_: Request, exc: GrokApiException) -> JSONR
101
  )
102
 
103
 
104
- async def global_exception_handler(_: Request) -> JSONResponse:
105
  """处理未捕获异常"""
106
  return JSONResponse(
107
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
 
101
  )
102
 
103
 
104
+ async def global_exception_handler(_: Request, exc: Exception) -> JSONResponse:
105
  """处理未捕获异常"""
106
  return JSONResponse(
107
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
app/models/grok_models.py CHANGED
@@ -66,6 +66,20 @@ _MODEL_CONFIG = {
66
  "default_max_output_tokens": 65536,
67
  "supported_max_output_tokens": 131072,
68
  "default_top_p": 0.95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  }
70
  }
71
 
@@ -82,6 +96,7 @@ class Models(Enum):
82
  GROK_4_FAST_EXPERT = "grok-4-fast-expert"
83
  GROK_4_EXPERT = "grok-4-expert"
84
  GROK_4_HEAVY = "grok-4-heavy"
 
85
 
86
  @classmethod
87
  def get_model_info(cls, model: str) -> dict:
 
66
  "default_max_output_tokens": 65536,
67
  "supported_max_output_tokens": 131072,
68
  "default_top_p": 0.95
69
+ },
70
+ "grok-imagine-0.9": {
71
+ "grok_model": ("grok-3", "MODEL_MODE_FAST"),
72
+ "rate_limit_model": "grok-3",
73
+ "cost": {"type": "low_cost", "multiplier": 1, "description": "计1次调用"},
74
+ "requires_super": False,
75
+ "display_name": "Grok Imagine 0.9",
76
+ "description": "Video generation model powered by Grok",
77
+ "raw_model_path": "xai/grok-imagine-0.9",
78
+ "default_temperature": 1.0,
79
+ "default_max_output_tokens": 8192,
80
+ "supported_max_output_tokens": 131072,
81
+ "default_top_p": 0.95,
82
+ "is_video_model": True
83
  }
84
  }
85
 
 
96
  GROK_4_FAST_EXPERT = "grok-4-fast-expert"
97
  GROK_4_EXPERT = "grok-4-expert"
98
  GROK_4_HEAVY = "grok-4-heavy"
99
+ GROK_IMAGINE_0_9 = "grok-imagine-0.9"
100
 
101
  @classmethod
102
  def get_model_info(cls, model: str) -> dict:
app/models/openai_schema.py CHANGED
@@ -15,8 +15,8 @@ class OpenAIChatRequest(BaseModel):
15
  max_tokens: Optional[int] = Field(None, ge=1, le=100000, description="最大Token数")
16
  top_p: Optional[float] = Field(1.0, ge=0, le=1, description="采样参数")
17
 
18
- @field_validator('messages')
19
  @classmethod
 
20
  def validate_messages(cls, v):
21
  """验证消息格式"""
22
  if not v:
@@ -49,13 +49,13 @@ class OpenAIChatRequest(BaseModel):
49
 
50
  return v
51
 
52
- @field_validator('model')
53
  @classmethod
 
54
  def validate_model(cls, v):
55
  """验证模型名称"""
56
  allowed_models = [
57
  'grok-3-fast', 'grok-4-fast', 'grok-4-fast-expert',
58
- 'grok-4-expert', 'grok-4-heavy'
59
  ]
60
  if v not in allowed_models:
61
  raise HTTPException(
@@ -68,16 +68,16 @@ class OpenAIChatCompletionMessage(BaseModel):
68
  """聊天完成消息"""
69
  role: str = Field(..., description="角色")
70
  content: str = Field(..., description="消息内容")
71
- reference_id: Optional[str] = Field(None, description="参考ID")
72
- annotations: Optional[List[str]] = Field(None, description="注释")
73
 
74
 
75
  class OpenAIChatCompletionChoice(BaseModel):
76
  """聊天完成选项"""
77
  index: int = Field(..., description="选项索引")
78
  message: OpenAIChatCompletionMessage = Field(..., description="响应消息")
79
- logprobs: Optional[float] = Field(None, description="对数概率")
80
- finish_reason: str = Field("stop", description="完成原因")
81
 
82
 
83
  class OpenAIChatCompletionResponse(BaseModel):
@@ -108,8 +108,8 @@ class OpenAIChatCompletionChunkChoice(BaseModel):
108
  class OpenAIChatCompletionChunkResponse(BaseModel):
109
  """流式聊天完成响应"""
110
  id: str = Field(..., description="响应ID")
111
- object: str = Field("chat.completion.chunk", description="对象类型")
112
  created: int = Field(..., description="创建时间戳")
113
  model: str = Field(..., description="使用的模型")
114
- system_fingerprint: Optional[str] = Field(None, description="系统指纹")
115
  choices: List[OpenAIChatCompletionChunkChoice] = Field(..., description="响应选项")
 
15
  max_tokens: Optional[int] = Field(None, ge=1, le=100000, description="最大Token数")
16
  top_p: Optional[float] = Field(1.0, ge=0, le=1, description="采样参数")
17
 
 
18
  @classmethod
19
+ @field_validator('messages')
20
  def validate_messages(cls, v):
21
  """验证消息格式"""
22
  if not v:
 
49
 
50
  return v
51
 
 
52
  @classmethod
53
+ @field_validator('model')
54
  def validate_model(cls, v):
55
  """验证模型名称"""
56
  allowed_models = [
57
  'grok-3-fast', 'grok-4-fast', 'grok-4-fast-expert',
58
+ 'grok-4-expert', 'grok-4-heavy', 'grok-imagine-0.9'
59
  ]
60
  if v not in allowed_models:
61
  raise HTTPException(
 
68
  """聊天完成消息"""
69
  role: str = Field(..., description="角色")
70
  content: str = Field(..., description="消息内容")
71
+ reference_id: Optional[str] = Field(default=None, description="参考ID")
72
+ annotations: Optional[List[str]] = Field(default=None, description="注释")
73
 
74
 
75
  class OpenAIChatCompletionChoice(BaseModel):
76
  """聊天完成选项"""
77
  index: int = Field(..., description="选项索引")
78
  message: OpenAIChatCompletionMessage = Field(..., description="响应消息")
79
+ logprobs: Optional[float] = Field(default=None, description="对数概率")
80
+ finish_reason: str = Field(default="stop", description="完成原因")
81
 
82
 
83
  class OpenAIChatCompletionResponse(BaseModel):
 
108
  class OpenAIChatCompletionChunkResponse(BaseModel):
109
  """流式聊天完成响应"""
110
  id: str = Field(..., description="响应ID")
111
+ object: str = Field(default="chat.completion.chunk", description="对象类型")
112
  created: int = Field(..., description="创建时间戳")
113
  model: str = Field(..., description="使用的模型")
114
+ system_fingerprint: Optional[str] = Field(default=None, description="系统指纹")
115
  choices: List[OpenAIChatCompletionChunkChoice] = Field(..., description="响应选项")
app/services/grok/cache.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """缓存服务模块"""
2
+
3
+ import asyncio
4
+ from pathlib import Path
5
+ from typing import Optional
6
+ from curl_cffi.requests import AsyncSession
7
+
8
+ from app.core.config import setting
9
+ from app.core.logger import logger
10
+ from app.services.grok.statsig import get_dynamic_headers
11
+
12
+
13
+ class CacheService:
14
+ """缓存服务基类"""
15
+
16
+ def __init__(self, cache_type: str):
17
+ """初始化缓存服务"""
18
+ self.cache_type = cache_type
19
+ self.cache_dir = Path(f"data/temp/{cache_type}")
20
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
21
+
22
+ @staticmethod
23
+ def _get_cache_filename(file_path: str) -> str:
24
+ """将文件路径转换为缓存文件名"""
25
+ filename = file_path.lstrip('/').replace('/', '-')
26
+ return filename
27
+
28
+ def _get_cache_path(self, file_path: str) -> Path:
29
+ """获取缓存文件的完整路径"""
30
+ filename = self._get_cache_filename(file_path)
31
+ return self.cache_dir / filename
32
+
33
+ async def download_file(self, file_path: str, auth_token: str, timeout: float = 30.0) -> Optional[Path]:
34
+ """下载并缓存文件
35
+
36
+ Args:
37
+ file_path: 文件路径,如 /users/xxx/generated/xxx/file.jpg
38
+ auth_token: 认证令牌
39
+ timeout: 下载超时时间(秒)
40
+
41
+ Returns:
42
+ 缓存文件路径,下载失败返回 None
43
+ """
44
+ cache_path = self._get_cache_path(file_path)
45
+
46
+ if cache_path.exists():
47
+ logger.debug(f"[{self.cache_type.upper()}Cache] 文件已缓存: {cache_path}")
48
+ return cache_path
49
+
50
+ file_url = f"https://assets.grok.com{file_path}"
51
+
52
+ try:
53
+ # 构建 Cookie
54
+ cf_clearance = setting.grok_config.get("cf_clearance", "")
55
+ cookie = f"{auth_token};{cf_clearance}" if cf_clearance else auth_token
56
+
57
+ # 构建请求头
58
+ headers = {
59
+ **get_dynamic_headers(pathname=file_path),
60
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
61
+ "Sec-Fetch-Dest": "document",
62
+ "Sec-Fetch-Mode": "navigate",
63
+ "Sec-Fetch-Site": "same-site",
64
+ "Sec-Fetch-User": "?1",
65
+ "Upgrade-Insecure-Requests": "1",
66
+ "Referer": "https://grok.com/",
67
+ "Cookie": cookie
68
+ }
69
+
70
+ # 代理配置
71
+ proxy_url = setting.grok_config.get("proxy_url")
72
+ proxies = {"http": proxy_url, "https": proxy_url} if proxy_url else {}
73
+
74
+ async with AsyncSession() as session:
75
+ logger.debug(f"[{self.cache_type.upper()}Cache] 开始下载: {file_url}")
76
+ response = await session.get(
77
+ file_url,
78
+ headers=headers,
79
+ proxies=proxies,
80
+ timeout=timeout,
81
+ allow_redirects=True,
82
+ impersonate="chrome133a"
83
+ )
84
+ response.raise_for_status()
85
+
86
+ cache_path.write_bytes(response.content)
87
+ logger.debug(f"[{self.cache_type.upper()}Cache] 文件已缓存: {cache_path} ({len(response.content)} bytes)")
88
+
89
+ asyncio.create_task(self.cleanup_cache())
90
+
91
+ return cache_path
92
+
93
+ except Exception as e:
94
+ logger.error(f"[{self.cache_type.upper()}Cache] 下载文件失败: {e}")
95
+ return None
96
+
97
+ def get_cached_file(self, file_path: str) -> Optional[Path]:
98
+ """获取缓存的文件路径
99
+
100
+ Args:
101
+ file_path: 文件路径
102
+
103
+ Returns:
104
+ 缓存文件路径,不存在返回 None
105
+ """
106
+ cache_path = self._get_cache_path(file_path)
107
+ return cache_path if cache_path.exists() else None
108
+
109
+ async def cleanup_cache(self):
110
+ """清理缓存目录,确保不超过配置的大小限制"""
111
+ try:
112
+ # 获取配置的最大缓存大小
113
+ config_key = f"{self.cache_type}_cache_max_size_mb"
114
+ max_size_mb = setting.global_config.get(config_key, 500)
115
+ max_size_bytes = max_size_mb * 1024 * 1024
116
+
117
+ # 获取缓存大小和修改时间
118
+ files = []
119
+ total_size = 0
120
+
121
+ for file_path in self.cache_dir.glob("*"):
122
+ if file_path.is_file():
123
+ size = file_path.stat().st_size
124
+ mtime = file_path.stat().st_mtime
125
+ files.append((file_path, size, mtime))
126
+ total_size += size
127
+
128
+ # 如果总大小未超限,无需清理
129
+ if total_size <= max_size_bytes:
130
+ logger.debug(f"[{self.cache_type.upper()}Cache] 缓存大小 {total_size / 1024 / 1024:.2f}MB,未超限")
131
+ return
132
+
133
+ logger.info(f"[{self.cache_type.upper()}Cache] 缓存大小 {total_size / 1024 / 1024:.2f}MB 超过限制 {max_size_mb}MB,开始清理")
134
+
135
+ # 按修改时间排序
136
+ files.sort(key=lambda x: x[2])
137
+
138
+ # 删除文件直到总大小低于限制
139
+ for file_path, size, _ in files:
140
+ if total_size <= max_size_bytes:
141
+ break
142
+
143
+ file_path.unlink()
144
+ total_size -= size
145
+ logger.debug(f"[{self.cache_type.upper()}Cache] 已删除缓存文件: {file_path}")
146
+
147
+ logger.info(f"[{self.cache_type.upper()}Cache] 缓存清理完成,当前大小 {total_size / 1024 / 1024:.2f}MB")
148
+
149
+ except Exception as e:
150
+ logger.error(f"[{self.cache_type.upper()}Cache] 清理缓存失败: {e}")
151
+
152
+
153
+ class ImageCacheService(CacheService):
154
+ """图片缓存服务"""
155
+
156
+ def __init__(self):
157
+ super().__init__("image")
158
+
159
+ async def download_image(self, image_path: str, auth_token: str) -> Optional[Path]:
160
+ """下载并缓存图片
161
+
162
+ Args:
163
+ image_path: 图片路径,如 /users/xxx/generated/xxx/image.jpg
164
+ auth_token: 认证令牌
165
+
166
+ Returns:
167
+ 缓存文件路径,下载失败返回 None`
168
+ """
169
+ return await self.download_file(image_path, auth_token, timeout=30.0)
170
+
171
+ def get_cached_image(self, image_path: str) -> Optional[Path]:
172
+ """获取缓存的图片路径
173
+
174
+ Args:
175
+ image_path: 图片路径
176
+
177
+ Returns:
178
+ 缓存文件路径,不存在返回 None
179
+ """
180
+ return self.get_cached_file(image_path)
181
+
182
+
183
+ class VideoCacheService(CacheService):
184
+ """视频缓存服务"""
185
+
186
+ def __init__(self):
187
+ super().__init__("video")
188
+
189
+ async def download_video(self, video_path: str, auth_token: str) -> Optional[Path]:
190
+ """下载并缓存视频
191
+
192
+ Args:
193
+ video_path: 视频路径,如 /users/xxx/generated/xxx/video.mp4
194
+ auth_token: 认证令牌
195
+
196
+ Returns:
197
+ 缓存文件路径,下载失败返回 None
198
+ """
199
+ return await self.download_file(video_path, auth_token, timeout=60.0)
200
+
201
+ def get_cached_video(self, video_path: str) -> Optional[Path]:
202
+ """获取缓存的视频路径
203
+
204
+ Args:
205
+ video_path: 视频路径
206
+
207
+ Returns:
208
+ 缓存文件路径,不存在返回 None
209
+ """
210
+ return self.get_cached_file(video_path)
211
+
212
+
213
+ # 全局实例
214
+ image_cache_service = ImageCacheService()
215
+ video_cache_service = VideoCacheService()
216
+
app/services/grok/client.py CHANGED
@@ -3,6 +3,7 @@
3
  import asyncio
4
  import json
5
  from typing import Dict, List, Tuple, Any
 
6
  from curl_cffi import requests as curl_requests
7
 
8
  from app.core.config import setting
@@ -39,11 +40,24 @@ class GrokClient:
39
  auth_token = token_manager.get_token(model)
40
  model_name, model_mode = Models.to_grok(model)
41
 
 
 
 
 
 
 
 
 
42
  # 上传图片并获取附件ID列表
43
  image_attachments = await GrokClient._upload_imgs(image_urls, auth_token)
44
 
 
 
 
 
 
45
  # 构建Grok请求载荷
46
- payload = GrokClient._build_payload(content, model_name, model_mode, image_attachments)
47
 
48
  return await GrokClient._send_request(payload, auth_token, model, stream)
49
 
@@ -89,9 +103,9 @@ class GrokClient:
89
  return image_attachments
90
 
91
  @staticmethod
92
- def _build_payload(content: str, model_name: str, model_mode: str, image_attachments: List[str]) -> Dict[str, Any]:
93
  """构建Grok API请求载荷"""
94
- return {
95
  "temporary": setting.grok_config.get("temporary", True),
96
  "modelName": model_name,
97
  "message": content,
@@ -116,6 +130,13 @@ class GrokClient:
116
  "modelMode": model_mode,
117
  "isAsyncChat": False
118
  }
 
 
 
 
 
 
 
119
 
120
  @staticmethod
121
  async def _send_request(payload: dict, auth_token: str, model: str, stream: bool):
@@ -127,18 +148,23 @@ class GrokClient:
127
  try:
128
  # 构建请求头和代理
129
  headers = GrokClient._build_headers(auth_token)
130
- proxies = GrokClient._get_proxy()
 
 
 
 
 
 
 
 
 
 
131
 
132
  # 在线程池中执行同步HTTP请求,避免阻塞事件循环
133
  response = await asyncio.to_thread(
134
  curl_requests.post,
135
  GROK_API_ENDPOINT,
136
- headers=headers,
137
- data=json.dumps(payload),
138
- impersonate=IMPERSONATE_BROWSER,
139
- timeout=REQUEST_TIMEOUT,
140
- stream=True,
141
- **proxies
142
  )
143
 
144
  logger.debug(f"[Client] API响应状态码: {response.status_code}")
@@ -154,9 +180,14 @@ class GrokClient:
154
  return await GrokClient._process_response(response, auth_token, model, stream)
155
 
156
  except curl_requests.RequestsError as e:
 
157
  raise GrokApiException(f"网络错误: {e}", "NETWORK_ERROR") from e
158
  except json.JSONDecodeError as e:
 
159
  raise GrokApiException(f"JSON解析错误: {e}", "JSON_ERROR") from e
 
 
 
160
 
161
  @staticmethod
162
  def _build_headers(auth_token: str) -> Dict[str, str]:
@@ -183,9 +214,9 @@ class GrokClient:
183
  try:
184
  error_data = response.json()
185
  error_message = str(error_data)
186
- except Exception:
187
  error_data = response.text
188
- error_message = error_data[:200] if error_data else "未知错误"
189
 
190
  # 记录Token失败
191
  asyncio.create_task(token_manager.record_failure(auth_token, response.status_code, error_message))
@@ -203,7 +234,7 @@ class GrokClient:
203
  result = GrokResponseProcessor.process_stream(response, auth_token)
204
  asyncio.create_task(GrokClient._update_rate_limits(auth_token, model))
205
  else:
206
- result = await GrokResponseProcessor.process_normal(response, auth_token)
207
  asyncio.create_task(GrokClient._update_rate_limits(auth_token, model))
208
 
209
  return result
 
3
  import asyncio
4
  import json
5
  from typing import Dict, List, Tuple, Any
6
+
7
  from curl_cffi import requests as curl_requests
8
 
9
  from app.core.config import setting
 
40
  auth_token = token_manager.get_token(model)
41
  model_name, model_mode = Models.to_grok(model)
42
 
43
+ # 检查是否为视频模型
44
+ is_video_model = Models.get_model_info(model).get("is_video_model", False)
45
+
46
+ # 视频模型特殊处理:只允许一张图片
47
+ if is_video_model and len(image_urls) > 1:
48
+ logger.warning(f"[Client] 视频模型只允许一张图片,当前有{len(image_urls)}张,只使用第一张")
49
+ image_urls = image_urls[:1]
50
+
51
  # 上传图片并获取附件ID列表
52
  image_attachments = await GrokClient._upload_imgs(image_urls, auth_token)
53
 
54
+ # 视频模型:文本添加 --mode=custom
55
+ if is_video_model:
56
+ content = f"{content} --mode=custom"
57
+ logger.debug(f"[Client] 视频模型文本处理: {content}")
58
+
59
  # 构建Grok请求载荷
60
+ payload = GrokClient._build_payload(content, model_name, model_mode, image_attachments, is_video_model)
61
 
62
  return await GrokClient._send_request(payload, auth_token, model, stream)
63
 
 
103
  return image_attachments
104
 
105
  @staticmethod
106
+ def _build_payload(content: str, model_name: str, model_mode: str, image_attachments: List[str], is_video_model: bool = False) -> Dict[str, Any]:
107
  """构建Grok API请求载荷"""
108
+ payload = {
109
  "temporary": setting.grok_config.get("temporary", True),
110
  "modelName": model_name,
111
  "message": content,
 
130
  "modelMode": model_mode,
131
  "isAsyncChat": False
132
  }
133
+
134
+ # 视频模型特殊配置
135
+ if is_video_model:
136
+ payload["toolOverrides"] = {"videoGen": True}
137
+ logger.debug("[Client] 视频模型载荷配置: toolOverrides.videoGen = True")
138
+
139
+ return payload
140
 
141
  @staticmethod
142
  async def _send_request(payload: dict, auth_token: str, model: str, stream: bool):
 
148
  try:
149
  # 构建请求头和代理
150
  headers = GrokClient._build_headers(auth_token)
151
+ proxy_config = GrokClient._get_proxy()
152
+
153
+ # 构建请求参数
154
+ request_kwargs = {
155
+ "headers": headers,
156
+ "data": json.dumps(payload),
157
+ "impersonate": IMPERSONATE_BROWSER,
158
+ "timeout": REQUEST_TIMEOUT,
159
+ "stream": True,
160
+ "proxies": proxy_config if proxy_config else None
161
+ }
162
 
163
  # 在线程池中执行同步HTTP请求,避免阻塞事件循环
164
  response = await asyncio.to_thread(
165
  curl_requests.post,
166
  GROK_API_ENDPOINT,
167
+ **request_kwargs
 
 
 
 
 
168
  )
169
 
170
  logger.debug(f"[Client] API响应状态码: {response.status_code}")
 
180
  return await GrokClient._process_response(response, auth_token, model, stream)
181
 
182
  except curl_requests.RequestsError as e:
183
+ logger.error(f"[Client] 网络请求错误: {e}")
184
  raise GrokApiException(f"网络错误: {e}", "NETWORK_ERROR") from e
185
  except json.JSONDecodeError as e:
186
+ logger.error(f"[Client] JSON解析错误: {e}")
187
  raise GrokApiException(f"JSON解析错误: {e}", "JSON_ERROR") from e
188
+ except Exception as e:
189
+ logger.error(f"[Client] 未知请求错误: {type(e).__name__}: {e}")
190
+ raise GrokApiException(f"请求处理错误: {e}", "REQUEST_ERROR") from e
191
 
192
  @staticmethod
193
  def _build_headers(auth_token: str) -> Dict[str, str]:
 
214
  try:
215
  error_data = response.json()
216
  error_message = str(error_data)
217
+ except Exception as e:
218
  error_data = response.text
219
+ error_message = error_data[:200] if error_data else e
220
 
221
  # 记录Token失败
222
  asyncio.create_task(token_manager.record_failure(auth_token, response.status_code, error_message))
 
234
  result = GrokResponseProcessor.process_stream(response, auth_token)
235
  asyncio.create_task(GrokClient._update_rate_limits(auth_token, model))
236
  else:
237
+ result = await GrokResponseProcessor.process_normal(response, auth_token, model)
238
  asyncio.create_task(GrokClient._update_rate_limits(auth_token, model))
239
 
240
  return result
app/services/grok/processer.py CHANGED
@@ -3,7 +3,8 @@
3
  import json
4
  import uuid
5
  import time
6
- from typing import Iterator
 
7
 
8
  from app.core.config import setting
9
  from app.core.exception import GrokApiException
@@ -16,15 +17,70 @@ from app.models.openai_schema import (
16
  OpenAIChatCompletionChunkChoice,
17
  OpenAIChatCompletionChunkMessage
18
  )
19
- from app.services.grok.image_cache import image_cache_service
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
 
22
  class GrokResponseProcessor:
23
  """Grok API 响应处理器"""
24
 
25
  @staticmethod
26
- async def process_normal(response, auth_token: str) -> OpenAIChatCompletionResponse:
27
  """处理非流式响应"""
 
28
  try:
29
  for chunk in response.iter_lines():
30
  if not chunk:
@@ -40,8 +96,51 @@ class GrokResponseProcessor:
40
  {"code": error.get("code")}
41
  )
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  # 提取模型响应
44
- model_response = data.get("result", {}).get("response", {}).get("modelResponse")
45
  if not model_response:
46
  continue
47
 
@@ -53,10 +152,10 @@ class GrokResponseProcessor:
53
  )
54
 
55
  # 构建响应内容
56
- model = model_response.get("model")
57
  content = model_response.get("message", "")
58
 
59
- # 添加生成的图片
60
  if images := model_response.get("generatedImageUrls"):
61
  for img in images:
62
  try:
@@ -72,12 +171,12 @@ class GrokResponseProcessor:
72
  logger.warning(f"[Processor] 缓存图片失败: {e}")
73
  content += f"\n![Generated Image](https://assets.grok.com/{img})"
74
 
75
- # 返回OpenAI格式响应
76
  result = OpenAIChatCompletionResponse(
77
  id=f"chatcmpl-{uuid.uuid4()}",
78
  object="chat.completion",
79
  created=int(time.time()),
80
- model=model,
81
  choices=[OpenAIChatCompletionChoice(
82
  index=0,
83
  message=OpenAIChatCompletionMessage(
@@ -88,29 +187,48 @@ class GrokResponseProcessor:
88
  )],
89
  usage=None
90
  )
 
91
  response.close()
92
  return result
93
 
94
  raise GrokApiException("无响应数据", "NO_RESPONSE")
95
 
96
  except json.JSONDecodeError as e:
 
97
  raise GrokApiException(f"JSON解析失败: {e}", "JSON_ERROR") from e
 
 
 
98
  finally:
99
- # 确保响应对象被关闭
100
- if hasattr(response, 'close'):
101
- response.close()
 
 
 
102
 
103
  @staticmethod
104
- async def process_stream(response, auth_token: str) -> Iterator[str]:
105
  """处理流式响应"""
 
106
  is_image = False
107
  is_thinking = False
108
  thinking_finished = False
109
  chunk_index = 0
110
  model = None
111
  filtered_tags = setting.grok_config.get("filtered_tags", "").split(",")
112
-
113
- def make_chunk(content: str, finish: str = None):
 
 
 
 
 
 
 
 
 
 
114
  """生成OpenAI格式的响应块"""
115
  chunk_data = OpenAIChatCompletionChunkResponse(
116
  id=f"chatcmpl-{uuid.uuid4()}",
@@ -120,8 +238,8 @@ class GrokResponseProcessor:
120
  index=chunk_index,
121
  delta=OpenAIChatCompletionChunkMessage(
122
  role="assistant",
123
- content=content
124
- ) if content else {},
125
  finish_reason=finish
126
  )]
127
  ).model_dump()
@@ -130,6 +248,14 @@ class GrokResponseProcessor:
130
 
131
  try:
132
  for chunk in response.iter_lines():
 
 
 
 
 
 
 
 
133
  logger.debug(f"[Processor] 接收到数据块: {len(chunk)} bytes")
134
  if not chunk:
135
  continue
@@ -156,6 +282,47 @@ class GrokResponseProcessor:
156
  if m := user_resp.get("model"):
157
  model = m
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  # 检查生成模式
160
  if grok_resp.get("imageAttachmentInfo"):
161
  is_image = True
@@ -163,16 +330,16 @@ class GrokResponseProcessor:
163
  # 获取token
164
  token = grok_resp.get("token", "")
165
 
166
- # 图片模式
167
  if is_image:
168
  if model_resp := grok_resp.get("modelResponse"):
169
  # 生成图片链接并缓存
170
  content = ""
171
  for img in model_resp.get("generatedImageUrls", []):
172
  try:
173
- # 异步下载并缓存图片
174
  await image_cache_service.download_image(f"/{img}", auth_token)
175
- # 使用本地缓存路径
176
  img_path = img.replace('/', '-')
177
  base_url = setting.global_config.get("base_url", "")
178
  img_url = f"{base_url}/images/{img_path}" if base_url else f"/images/{img_path}"
@@ -181,12 +348,14 @@ class GrokResponseProcessor:
181
  logger.warning(f"[Processor] 缓存图片失败: {e}")
182
  content += f"![Generated Image](https://assets.grok.com/{img})\n"
183
  yield make_chunk(content.strip(), "stop")
 
184
  return
185
  elif token:
186
  yield make_chunk(token)
 
187
  chunk_index += 1
188
 
189
- # 对话模式
190
  else:
191
  # 过滤 list 格式的 token
192
  if isinstance(token, list):
@@ -200,7 +369,7 @@ class GrokResponseProcessor:
200
  current_is_thinking = grok_resp.get("isThinking", False)
201
  message_tag = grok_resp.get("messageTag")
202
 
203
- # 跳过后续的 thinking
204
  if thinking_finished and current_is_thinking:
205
  continue
206
 
@@ -208,7 +377,7 @@ class GrokResponseProcessor:
208
  if grok_resp.get("toolUsageCardId"):
209
  if web_search := grok_resp.get("webSearchResults"):
210
  if current_is_thinking:
211
- # 添加搜索结果到 token
212
  for result in web_search.get("results", []):
213
  title = result.get("title", "")
214
  url = result.get("url", "")
@@ -238,6 +407,7 @@ class GrokResponseProcessor:
238
  thinking_finished = True
239
 
240
  yield make_chunk(content)
 
241
  chunk_index += 1
242
  is_thinking = current_is_thinking
243
 
@@ -250,11 +420,23 @@ class GrokResponseProcessor:
250
 
251
  # 发送结束块
252
  yield make_chunk("", "stop")
 
253
  # 发送流结束标记
254
  yield "data: [DONE]\n\n"
 
 
 
255
 
256
  except Exception as e:
257
  logger.error(f"[Processor] 流式处理严重错误: {e}")
258
  yield make_chunk(f"处理错误: {e}", "error")
259
  # 发送流结束标记
260
- yield "data: [DONE]\n\n"
 
 
 
 
 
 
 
 
 
3
  import json
4
  import uuid
5
  import time
6
+ import asyncio
7
+ from typing import AsyncGenerator
8
 
9
  from app.core.config import setting
10
  from app.core.exception import GrokApiException
 
17
  OpenAIChatCompletionChunkChoice,
18
  OpenAIChatCompletionChunkMessage
19
  )
20
+ from app.services.grok.cache import image_cache_service, video_cache_service
21
+
22
+
23
+ class StreamTimeoutManager:
24
+ """流式响应超时管理器"""
25
+
26
+ def __init__(self, chunk_timeout: int = 120, first_response_timeout: int = 30, total_timeout: int = 600):
27
+ """初始化超时管理器
28
+
29
+ Args:
30
+ chunk_timeout: 数据块间隔超时(秒)
31
+ first_response_timeout: 首次响应超时(秒)
32
+ total_timeout: 总超时限制(秒,0表示不限制)
33
+ """
34
+ self.chunk_timeout = chunk_timeout
35
+ self.first_response_timeout = first_response_timeout
36
+ self.total_timeout = total_timeout
37
+
38
+ self.start_time = asyncio.get_event_loop().time()
39
+ self.last_chunk_time = self.start_time
40
+ self.first_chunk_received = False
41
+
42
+ def check_timeout(self) -> tuple[bool, str]:
43
+ """检查是否超时
44
+
45
+ Returns:
46
+ (is_timeout, timeout_message): 是否超时及超时信息
47
+ """
48
+ current_time = asyncio.get_event_loop().time()
49
+
50
+ # 检查首次响应超时
51
+ if not self.first_chunk_received:
52
+ if current_time - self.start_time > self.first_response_timeout:
53
+ return True, f"首次响应超时 ({self.first_response_timeout}秒未收到首个数据块)"
54
+
55
+ # 检查总超时
56
+ if self.total_timeout > 0:
57
+ if current_time - self.start_time > self.total_timeout:
58
+ return True, f"流式响应总超时 ({self.total_timeout}秒)"
59
+
60
+ # 检查数据块间隔超时
61
+ if self.first_chunk_received:
62
+ if current_time - self.last_chunk_time > self.chunk_timeout:
63
+ return True, f"数据块间隔超时 ({self.chunk_timeout}秒无新数据)"
64
+
65
+ return False, ""
66
+
67
+ def mark_chunk_received(self):
68
+ """标记收到数据块"""
69
+ self.last_chunk_time = asyncio.get_event_loop().time()
70
+ self.first_chunk_received = True
71
+
72
+ def get_total_duration(self) -> float:
73
+ """获取总耗时(秒)"""
74
+ return asyncio.get_event_loop().time() - self.start_time
75
 
76
 
77
  class GrokResponseProcessor:
78
  """Grok API 响应处理器"""
79
 
80
  @staticmethod
81
+ async def process_normal(response, auth_token: str, model: str = None) -> OpenAIChatCompletionResponse:
82
  """处理非流式响应"""
83
+ response_closed = False
84
  try:
85
  for chunk in response.iter_lines():
86
  if not chunk:
 
96
  {"code": error.get("code")}
97
  )
98
 
99
+ # 提取响应数据
100
+ grok_resp = data.get("result", {}).get("response", {})
101
+
102
+ # 提取视频数据
103
+ if video_resp := grok_resp.get("streamingVideoGenerationResponse"):
104
+ if video_url := video_resp.get("videoUrl"):
105
+ logger.debug(f"[Processor] 检测到视频生成: {video_url}")
106
+ full_video_url = f"https://assets.grok.com/{video_url}"
107
+
108
+ # 下载并缓存视频
109
+ try:
110
+ cache_path = await video_cache_service.download_video(f"/{video_url}", auth_token)
111
+ if cache_path:
112
+ video_path = video_url.replace('/', '-')
113
+ base_url = setting.global_config.get("base_url", "")
114
+ local_video_url = f"{base_url}/images/{video_path}" if base_url else f"/images/{video_path}"
115
+ content = f'<video src="{local_video_url}" controls="controls" width="500" height="300"></video>'
116
+ else:
117
+ content = f'<video src="{full_video_url}" controls="controls" width="500" height="300"></video>'
118
+ except Exception as e:
119
+ logger.warning(f"[Processor] 缓存视频失败: {e}")
120
+ content = f'<video src="{full_video_url}" controls="controls" width="500" height="300"></video>'
121
+
122
+ # 返回视频响应
123
+ result = OpenAIChatCompletionResponse(
124
+ id=f"chatcmpl-{uuid.uuid4()}",
125
+ object="chat.completion",
126
+ created=int(time.time()),
127
+ model=model or "grok-imagine-0.9",
128
+ choices=[OpenAIChatCompletionChoice(
129
+ index=0,
130
+ message=OpenAIChatCompletionMessage(
131
+ role="assistant",
132
+ content=content
133
+ ),
134
+ finish_reason="stop"
135
+ )],
136
+ usage=None
137
+ )
138
+ response_closed = True
139
+ response.close()
140
+ return result
141
+
142
  # 提取模型响应
143
+ model_response = grok_resp.get("modelResponse")
144
  if not model_response:
145
  continue
146
 
 
152
  )
153
 
154
  # 构建响应内容
155
+ model_name = model_response.get("model")
156
  content = model_response.get("message", "")
157
 
158
+ # 提取图片数据
159
  if images := model_response.get("generatedImageUrls"):
160
  for img in images:
161
  try:
 
171
  logger.warning(f"[Processor] 缓存图片失败: {e}")
172
  content += f"\n![Generated Image](https://assets.grok.com/{img})"
173
 
174
+ # 返回 OpenAI 响应格式
175
  result = OpenAIChatCompletionResponse(
176
  id=f"chatcmpl-{uuid.uuid4()}",
177
  object="chat.completion",
178
  created=int(time.time()),
179
+ model=model_name,
180
  choices=[OpenAIChatCompletionChoice(
181
  index=0,
182
  message=OpenAIChatCompletionMessage(
 
187
  )],
188
  usage=None
189
  )
190
+ response_closed = True
191
  response.close()
192
  return result
193
 
194
  raise GrokApiException("无响应数据", "NO_RESPONSE")
195
 
196
  except json.JSONDecodeError as e:
197
+ logger.error(f"[Processor] JSON解析失败: {e}")
198
  raise GrokApiException(f"JSON解析失败: {e}", "JSON_ERROR") from e
199
+ except Exception as e:
200
+ logger.error(f"[Processor] 处理响应时发生未知错误: {type(e).__name__}: {e}")
201
+ raise GrokApiException(f"响应处理错误: {e}", "PROCESS_ERROR") from e
202
  finally:
203
+ # 确保响应对象被关闭,避免双重释放
204
+ if not response_closed and hasattr(response, 'close'):
205
+ try:
206
+ response.close()
207
+ except Exception as e:
208
+ logger.warning(f"[Processor] 关闭响应对象时出错: {e}")
209
 
210
  @staticmethod
211
+ async def process_stream(response, auth_token: str) -> AsyncGenerator[str, None]:
212
  """处理流式响应"""
213
+ # 流式生成状态
214
  is_image = False
215
  is_thinking = False
216
  thinking_finished = False
217
  chunk_index = 0
218
  model = None
219
  filtered_tags = setting.grok_config.get("filtered_tags", "").split(",")
220
+ video_progress_started = False
221
+ last_video_progress = -1
222
+ response_closed = False
223
+
224
+ # 初始化超时管理器
225
+ timeout_manager = StreamTimeoutManager(
226
+ chunk_timeout=setting.grok_config.get("stream_chunk_timeout", 120),
227
+ first_response_timeout=setting.grok_config.get("stream_first_response_timeout", 30),
228
+ total_timeout=setting.grok_config.get("stream_total_timeout", 600)
229
+ )
230
+
231
+ def make_chunk(chunk_content: str, finish: str = None):
232
  """生成OpenAI格式的响应块"""
233
  chunk_data = OpenAIChatCompletionChunkResponse(
234
  id=f"chatcmpl-{uuid.uuid4()}",
 
238
  index=chunk_index,
239
  delta=OpenAIChatCompletionChunkMessage(
240
  role="assistant",
241
+ content=chunk_content
242
+ ) if chunk_content else {},
243
  finish_reason=finish
244
  )]
245
  ).model_dump()
 
248
 
249
  try:
250
  for chunk in response.iter_lines():
251
+ # 超时检查
252
+ is_timeout, timeout_msg = timeout_manager.check_timeout()
253
+ if is_timeout:
254
+ logger.warning(f"[Processor] {timeout_msg}")
255
+ yield make_chunk("", "stop")
256
+ yield "data: [DONE]\n\n"
257
+ return
258
+
259
  logger.debug(f"[Processor] 接收到数据块: {len(chunk)} bytes")
260
  if not chunk:
261
  continue
 
282
  if m := user_resp.get("model"):
283
  model = m
284
 
285
+ # 提取视频数据
286
+ if video_resp := grok_resp.get("streamingVideoGenerationResponse"):
287
+ progress = video_resp.get("progress", 0)
288
+
289
+ if progress > last_video_progress:
290
+ last_video_progress = progress
291
+
292
+ # 添加 <think> 标签
293
+ if not video_progress_started:
294
+ content = f"<think>视频已生成{progress}%\n"
295
+ video_progress_started = True
296
+ elif progress < 100:
297
+ content = f"视频已生成{progress}%\n"
298
+ else:
299
+ # 进度100%时关闭 <think> 标签并立即处理视频
300
+ content = f"视频已生成{progress}%</think>\n"
301
+
302
+ # 立即下载并缓存视频
303
+ if v_url := video_resp.get("videoUrl"):
304
+ logger.debug(f"[Processor] 视频生成完成: {v_url}")
305
+ full_video_url = f"https://assets.grok.com/{v_url}"
306
+
307
+ try:
308
+ cache_path = await video_cache_service.download_video(f"/{v_url}", auth_token)
309
+ if cache_path:
310
+ video_path = v_url.replace('/', '-')
311
+ base_url = setting.global_config.get("base_url", "")
312
+ local_video_url = f"{base_url}/images/{video_path}" if base_url else f"/images/{video_path}"
313
+ content += f'<video src="{local_video_url}" controls="controls"></video>'
314
+ else:
315
+ content += f'<video src="{full_video_url}" controls="controls"></video>'
316
+ except Exception as e:
317
+ logger.warning(f"[Processor] 缓存视频失败: {e}")
318
+ content += f'<video src="{full_video_url}" controls="controls"></video>'
319
+
320
+ yield make_chunk(content)
321
+ timeout_manager.mark_chunk_received()
322
+ chunk_index += 1
323
+
324
+ continue
325
+
326
  # 检查生成模式
327
  if grok_resp.get("imageAttachmentInfo"):
328
  is_image = True
 
330
  # 获取token
331
  token = grok_resp.get("token", "")
332
 
333
+ # 提取图片数据
334
  if is_image:
335
  if model_resp := grok_resp.get("modelResponse"):
336
  # 生成图片链接并缓存
337
  content = ""
338
  for img in model_resp.get("generatedImageUrls", []):
339
  try:
340
+ # 缓存图片
341
  await image_cache_service.download_image(f"/{img}", auth_token)
342
+ # 本地图片路径
343
  img_path = img.replace('/', '-')
344
  base_url = setting.global_config.get("base_url", "")
345
  img_url = f"{base_url}/images/{img_path}" if base_url else f"/images/{img_path}"
 
348
  logger.warning(f"[Processor] 缓存图片失败: {e}")
349
  content += f"![Generated Image](https://assets.grok.com/{img})\n"
350
  yield make_chunk(content.strip(), "stop")
351
+ timeout_manager.mark_chunk_received()
352
  return
353
  elif token:
354
  yield make_chunk(token)
355
+ timeout_manager.mark_chunk_received()
356
  chunk_index += 1
357
 
358
+ # 提取对话数据
359
  else:
360
  # 过滤 list 格式的 token
361
  if isinstance(token, list):
 
369
  current_is_thinking = grok_resp.get("isThinking", False)
370
  message_tag = grok_resp.get("messageTag")
371
 
372
+ # 跳过后续的 <think> 标签
373
  if thinking_finished and current_is_thinking:
374
  continue
375
 
 
377
  if grok_resp.get("toolUsageCardId"):
378
  if web_search := grok_resp.get("webSearchResults"):
379
  if current_is_thinking:
380
+ # 封装搜索结果
381
  for result in web_search.get("results", []):
382
  title = result.get("title", "")
383
  url = result.get("url", "")
 
407
  thinking_finished = True
408
 
409
  yield make_chunk(content)
410
+ timeout_manager.mark_chunk_received()
411
  chunk_index += 1
412
  is_thinking = current_is_thinking
413
 
 
420
 
421
  # 发送结束块
422
  yield make_chunk("", "stop")
423
+
424
  # 发送流结束标记
425
  yield "data: [DONE]\n\n"
426
+
427
+ # 记录流式响应统计
428
+ logger.info(f"[Processor] 流式响应完成,总耗时: {timeout_manager.get_total_duration():.2f}秒")
429
 
430
  except Exception as e:
431
  logger.error(f"[Processor] 流式处理严重错误: {e}")
432
  yield make_chunk(f"处理错误: {e}", "error")
433
  # 发送流结束标记
434
+ yield "data: [DONE]\n\n"
435
+ finally:
436
+ # 确保响应对象被关闭
437
+ if not response_closed and hasattr(response, 'close'):
438
+ try:
439
+ response.close()
440
+ logger.debug("[Processor] 流式响应对象已关闭")
441
+ except Exception as e:
442
+ logger.warning(f"[Processor] 关闭流式响应对象时出错: {e}")
app/template/admin.html CHANGED
@@ -304,27 +304,58 @@
304
  </div>
305
  <div>
306
  <label class="text-sm font-medium flex items-center gap-1 mb-2">
307
- 缓存上限 (MB)
308
  <span class="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-muted-foreground text-muted-foreground cursor-help" style="font-size:10px;line-height:1" title="图片缓存目录的最大容量,超过后自动删除最旧的缓存文件">?</span>
309
  </label>
310
- <input id="cfgTempMaxSize" type="number" class="flex h-9 w-full rounded-md border border-input bg-background px-3 py-2 text-sm" placeholder="500">
 
 
 
 
 
 
 
311
  </div>
312
  <div>
313
  <label class="text-sm font-medium flex items-center gap-1 mb-2">
314
  缓存大小
315
  <span class="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-muted-foreground text-muted-foreground cursor-help" style="font-size:10px;line-height:1" title="当前缓存目录的大小">?</span>
316
  </label>
317
- <div class="flex gap-2">
318
- <input id="cacheSize" readonly class="flex h-9 flex-1 rounded-md border border-input bg-muted px-3 py-2 text-sm" placeholder="0 MB">
319
- <button onclick="clearCache()" class="inline-flex items-center justify-center rounded-md text-sm font-medium bg-orange-600 text-white hover:bg-orange-700 h-9 px-3 transition-colors">
320
- <svg class="h-4 w-4 mr-1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
321
- <path d="M3 6h18"/>
322
- <path d="M19 6v14a2 2 0 0 1-2 2H7a2 2 0 0 1-2-2V6m3 0V4a2 2 0 0 1 2-2h4a2 2 0 0 1 2 2v2"/>
323
- <line x1="10" y1="11" x2="10" y2="17"/>
324
- <line x1="14" y1="11" x2="14" y2="17"/>
325
- </svg>
326
- 清除
327
- </button>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  </div>
329
  </div>
330
  <div>
@@ -389,6 +420,27 @@
389
  <option value="true">开启</option>
390
  </select>
391
  </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
  </div>
393
  <div class="mt-6 flex justify-end">
394
  <button onclick="saveGrokSettings()" class="inline-flex items-center justify-center rounded-md text-sm font-medium bg-primary text-primary-foreground hover:bg-primary/90 h-9 px-6 transition-colors">保存配置</button>
@@ -482,11 +534,13 @@
482
  showToast=(m,t='info')=>{const d=document.createElement('div'),bc={success:'bg-green-600',error:'bg-destructive',info:'bg-primary'};d.className=`fixed bottom-4 right-4 ${bc[t]||bc.info} text-white px-4 py-2.5 rounded-lg shadow-lg text-sm font-medium z-50 animate-slide-up`;d.textContent=m;document.body.appendChild(d);setTimeout(()=>{d.style.opacity='0';d.style.transition='opacity 0.3s';setTimeout(()=>d.parentNode&&document.body.removeChild(d),300)},2000)},
483
  logout=async()=>{if(!confirm('确定要退出登录吗?'))return;try{await apiRequest('/api/logout',{method:'POST'})}catch(e){console.error('登出失败:',e)}finally{localStorage.removeItem('adminToken');location.href='/login'}},
484
  switchTab=t=>{['tokens','settings'].forEach(n=>{$(`panel${n.charAt(0).toUpperCase()+n.slice(1)}`).classList[n===t?'remove':'add']('hidden');$(`tab${n.charAt(0).toUpperCase()+n.slice(1)}`).classList[n===t?'add':'remove']('border-primary','text-primary');$(`tab${n.charAt(0).toUpperCase()+n.slice(1)}`).classList[n===t?'remove':'add']('border-transparent','text-muted-foreground')});t==='settings'&&loadSettings()},
485
- loadSettings=async()=>{try{const r=await apiRequest('/api/settings');if(!r)return;const d=await r.json();if(d.success){const g=d.data.global,k=d.data.grok;$('cfgAdminUser').value=g.admin_username||'';$('cfgAdminPass').value='';$('cfgLogLevel').value=g.log_level||'DEBUG';$('cfgTempMaxSize').value=g.temp_max_size_mb||500;$('cfgBaseUrl').value=g.base_url||'';$('cfgApiKey').value=k.api_key||'';$('cfgProxyUrl').value=k.proxy_url||'';$('cfgCfClearance').value=k.cf_clearance||'';$('cfgStatsigId').value=k.x_statsig_id||'';$('cfgFilteredTags').value=k.filtered_tags||'';$('cfgTemporary').value=k.temporary!==false?'true':'false';await loadCacheSize()}}catch(e){console.error('加载配置失败:',e);showToast('加载配置失败','error')}},
486
- loadCacheSize=async()=>{try{const r=await apiRequest('/api/cache/size');if(!r)return;const d=await r.json();if(d.success){$('cacheSize').value=d.data.size||'0 MB'}}catch(e){console.error('加载缓存大小失败:',e);$('cacheSize').value='0 MB'}},
 
 
487
  clearCache=async()=>{if(!confirm('确定要清理缓存吗?此操作将删除 /data/temp 目录中的所有文件!'))return;try{const r=await apiRequest('/api/cache/clear',{method:'POST'});if(!r)return;const d=await r.json();if(d.success){showToast(`缓存清理完成,已删除 ${d.data.deleted_count||0} 个文件`,'success');await loadCacheSize()}else{showToast('清理失败: '+(d.error||'未知错误'),'error')}}catch(e){showToast('清理失败: '+e.message,'error')}},
488
- saveGlobalSettings=async()=>{const gc={admin_username:$('cfgAdminUser').value,log_level:$('cfgLogLevel').value,temp_max_size_mb:parseInt($('cfgTempMaxSize').value)||500,base_url:$('cfgBaseUrl').value};if($('cfgAdminPass').value)gc.admin_password=$('cfgAdminPass').value;try{const r=await apiRequest('/api/settings');if(!r)return;const d=await r.json();if(!d.success)return showToast('加载配置失败','error');const s=await apiRequest('/api/settings',{method:'POST',body:JSON.stringify({global_config:gc,grok_config:d.data.grok})});if(!s)return;const sd=await s.json();sd.success?(showToast('全局配置保存成功','success'),$('cfgAdminPass').value=''):showToast('保存失败: '+(sd.error||'未知错误'),'error')}catch(e){showToast('保存失败: '+e.message,'error')}},
489
- saveGrokSettings=async()=>{const kc={api_key:$('cfgApiKey').value,proxy_url:$('cfgProxyUrl').value,cf_clearance:$('cfgCfClearance').value,x_statsig_id:$('cfgStatsigId').value,filtered_tags:$('cfgFilteredTags').value,temporary:$('cfgTemporary').value==='true'};try{const r=await apiRequest('/api/settings');if(!r)return;const d=await r.json();if(!d.success)return showToast('加载配置失败','error');const s=await apiRequest('/api/settings',{method:'POST',body:JSON.stringify({global_config:d.data.global,grok_config:kc})});if(!s)return;const sd=await s.json();sd.success?showToast('Grok配置保存成功','success'):showToast('保存失败: '+(sd.error||'未知错误'),'error')}catch(e){showToast('保存失败: '+e.message,'error')}};
490
  window.addEventListener('DOMContentLoaded',()=>{checkAuth();refreshTokens();setInterval(()=>{loadStats();updateRemaining()},30000)});
491
  </script>
492
  </body>
 
304
  </div>
305
  <div>
306
  <label class="text-sm font-medium flex items-center gap-1 mb-2">
307
+ 图片缓存上限 (MB)
308
  <span class="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-muted-foreground text-muted-foreground cursor-help" style="font-size:10px;line-height:1" title="图片缓存目录的最大容量,超过后自动删除最旧的缓存文件">?</span>
309
  </label>
310
+ <input id="cfgImageCacheMaxSize" type="number" class="flex h-9 w-full rounded-md border border-input bg-background px-3 py-2 text-sm" placeholder="500">
311
+ </div>
312
+ <div>
313
+ <label class="text-sm font-medium flex items-center gap-1 mb-2">
314
+ 视频缓存上限 (MB)
315
+ <span class="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-muted-foreground text-muted-foreground cursor-help" style="font-size:10px;line-height:1" title="视频缓存目录的最大容量,超过后自动删除最旧的缓存文件">?</span>
316
+ </label>
317
+ <input id="cfgVideoCacheMaxSize" type="number" class="flex h-9 w-full rounded-md border border-input bg-background px-3 py-2 text-sm" placeholder="1000">
318
  </div>
319
  <div>
320
  <label class="text-sm font-medium flex items-center gap-1 mb-2">
321
  缓存大小
322
  <span class="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-muted-foreground text-muted-foreground cursor-help" style="font-size:10px;line-height:1" title="当前缓存目录的大小">?</span>
323
  </label>
324
+ <div class="space-y-2">
325
+ <div class="flex gap-2 items-center">
326
+ <span class="text-sm text-muted-foreground w-12">图片:</span>
327
+ <input id="imageCacheSize" readonly class="flex h-9 flex-1 rounded-md border border-input bg-muted px-3 py-2 text-sm" placeholder="0 MB">
328
+ <button onclick="clearImageCache()" class="inline-flex items-center justify-center rounded-md text-sm font-medium bg-destructive text-destructive-foreground hover:bg-destructive/90 h-9 px-3 transition-colors" title="仅清除图片缓存">
329
+ <svg class="h-4 w-4" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
330
+ <rect x="3" y="3" width="18" height="18" rx="2" ry="2"/>
331
+ <circle cx="9" cy="9" r="2"/>
332
+ <path d="M21 15l-3.086-3.086a2 2 0 0 0-2.828 0L6 21"/>
333
+ </svg>
334
+ </button>
335
+ </div>
336
+ <div class="flex gap-2 items-center">
337
+ <span class="text-sm text-muted-foreground w-12">视频:</span>
338
+ <input id="videoCacheSize" readonly class="flex h-9 flex-1 rounded-md border border-input bg-muted px-3 py-2 text-sm" placeholder="0 MB">
339
+ <button onclick="clearVideoCache()" class="inline-flex items-center justify-center rounded-md text-sm font-medium bg-destructive text-destructive-foreground hover:bg-destructive/90 h-9 px-3 transition-colors" title="仅清除视频缓存">
340
+ <svg class="h-4 w-4" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
341
+ <polygon points="23 7 16 12 23 17 23 7"/>
342
+ <rect x="1" y="5" width="15" height="14" rx="2" ry="2"/>
343
+ </svg>
344
+ </button>
345
+ </div>
346
+ <div class="flex gap-2 items-center">
347
+ <span class="text-sm text-muted-foreground w-12">总计:</span>
348
+ <input id="totalCacheSize" readonly class="flex h-9 flex-1 rounded-md border border-input bg-muted px-3 py-2 text-sm font-medium" placeholder="0 MB">
349
+ <button onclick="clearCache()" class="inline-flex items-center justify-center rounded-md text-sm font-medium bg-destructive text-destructive-foreground hover:bg-destructive/90 h-9 px-3 transition-colors" title="清除所有缓存(图片+视频)">
350
+ <svg class="h-4 w-4 mr-1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
351
+ <path d="M3 6h18"/>
352
+ <path d="M19 6v14a2 2 0 0 1-2 2H7a2 2 0 0 1-2-2V6m3 0V4a2 2 0 0 1 2-2h4a2 2 0 0 1 2 2v2"/>
353
+ <line x1="10" y1="11" x2="10" y2="17"/>
354
+ <line x1="14" y1="11" x2="14" y2="17"/>
355
+ </svg>
356
+ 清除全部
357
+ </button>
358
+ </div>
359
  </div>
360
  </div>
361
  <div>
 
420
  <option value="true">开启</option>
421
  </select>
422
  </div>
423
+ <div>
424
+ <label class="text-sm font-medium flex items-center gap-1 mb-2">
425
+ 数据块间隔超时 (秒)
426
+ <span class="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-muted-foreground text-muted-foreground cursor-help" style="font-size:10px;line-height:1" title="流式响应中,两次数据块之间的最大等待时间。超过此时间将结束会话">?</span>
427
+ </label>
428
+ <input id="cfgStreamChunkTimeout" type="number" class="flex h-9 w-full rounded-md border border-input bg-background px-3 py-2 text-sm" placeholder="120">
429
+ </div>
430
+ <div>
431
+ <label class="text-sm font-medium flex items-center gap-1 mb-2">
432
+ 首次响应超时 (秒)
433
+ <span class="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-muted-foreground text-muted-foreground cursor-help" style="font-size:10px;line-height:1" title="从请求开始到收到第一个数据块的最大等待时间。超过此时间将结束会话">?</span>
434
+ </label>
435
+ <input id="cfgStreamFirstResponseTimeout" type="number" class="flex h-9 w-full rounded-md border border-input bg-background px-3 py-2 text-sm" placeholder="30">
436
+ </div>
437
+ <div>
438
+ <label class="text-sm font-medium flex items-center gap-1 mb-2">
439
+ 总超时限制 (秒)
440
+ <span class="inline-flex items-center justify-center w-3.5 h-3.5 rounded-full border border-muted-foreground text-muted-foreground cursor-help" style="font-size:10px;line-height:1" title="流式响应的总时长限制。设置为0表示不限制总时长">?</span>
441
+ </label>
442
+ <input id="cfgStreamTotalTimeout" type="number" class="flex h-9 w-full rounded-md border border-input bg-background px-3 py-2 text-sm" placeholder="600">
443
+ </div>
444
  </div>
445
  <div class="mt-6 flex justify-end">
446
  <button onclick="saveGrokSettings()" class="inline-flex items-center justify-center rounded-md text-sm font-medium bg-primary text-primary-foreground hover:bg-primary/90 h-9 px-6 transition-colors">保存配置</button>
 
534
  showToast=(m,t='info')=>{const d=document.createElement('div'),bc={success:'bg-green-600',error:'bg-destructive',info:'bg-primary'};d.className=`fixed bottom-4 right-4 ${bc[t]||bc.info} text-white px-4 py-2.5 rounded-lg shadow-lg text-sm font-medium z-50 animate-slide-up`;d.textContent=m;document.body.appendChild(d);setTimeout(()=>{d.style.opacity='0';d.style.transition='opacity 0.3s';setTimeout(()=>d.parentNode&&document.body.removeChild(d),300)},2000)},
535
  logout=async()=>{if(!confirm('确定要退出登录吗?'))return;try{await apiRequest('/api/logout',{method:'POST'})}catch(e){console.error('登出失败:',e)}finally{localStorage.removeItem('adminToken');location.href='/login'}},
536
  switchTab=t=>{['tokens','settings'].forEach(n=>{$(`panel${n.charAt(0).toUpperCase()+n.slice(1)}`).classList[n===t?'remove':'add']('hidden');$(`tab${n.charAt(0).toUpperCase()+n.slice(1)}`).classList[n===t?'add':'remove']('border-primary','text-primary');$(`tab${n.charAt(0).toUpperCase()+n.slice(1)}`).classList[n===t?'remove':'add']('border-transparent','text-muted-foreground')});t==='settings'&&loadSettings()},
537
+ loadSettings=async()=>{try{const r=await apiRequest('/api/settings');if(!r)return;const d=await r.json();if(d.success){const g=d.data.global,k=d.data.grok;$('cfgAdminUser').value=g.admin_username||'';$('cfgAdminPass').value='';$('cfgLogLevel').value=g.log_level||'DEBUG';$('cfgImageCacheMaxSize').value=g.image_cache_max_size_mb||500;$('cfgVideoCacheMaxSize').value=g.video_cache_max_size_mb||1000;$('cfgBaseUrl').value=g.base_url||'';$('cfgApiKey').value=k.api_key||'';$('cfgProxyUrl').value=k.proxy_url||'';$('cfgCfClearance').value=k.cf_clearance||'';$('cfgStatsigId').value=k.x_statsig_id||'';$('cfgFilteredTags').value=k.filtered_tags||'';$('cfgTemporary').value=k.temporary!==false?'true':'false';$('cfgStreamChunkTimeout').value=k.stream_chunk_timeout||120;$('cfgStreamFirstResponseTimeout').value=k.stream_first_response_timeout||30;$('cfgStreamTotalTimeout').value=k.stream_total_timeout||600;await loadCacheSize()}}catch(e){console.error('加载配置失败:',e);showToast('加载配置失败','error')}},
538
+ loadCacheSize=async()=>{try{const r=await apiRequest('/api/cache/size');if(!r)return;const d=await r.json();if(d.success){$('imageCacheSize').value=d.data.image_size||'0 MB';$('videoCacheSize').value=d.data.video_size||'0 MB';$('totalCacheSize').value=d.data.total_size||'0 MB'}}catch(e){console.error('加载缓存大小失败:',e);$('imageCacheSize').value='0 MB';$('videoCacheSize').value='0 MB';$('totalCacheSize').value='0 MB'}},
539
+ clearImageCache=async()=>{if(!confirm('确定要清理图片缓存吗?此操作将删除所有图片缓存文件!'))return;try{const r=await apiRequest('/api/cache/clear/images',{method:'POST'});if(!r)return;const d=await r.json();if(d.success){showToast(`图片缓存清理完成,已删除 ${d.data.deleted_count||0} 个文件`,'success');await loadCacheSize()}else{showToast('清理失败: '+(d.error||'未知错误'),'error')}}catch(e){showToast('清理失败: '+e.message,'error')}},
540
+ clearVideoCache=async()=>{if(!confirm('确定要清理视频缓存吗?此操作将删除所有视频缓存文件!'))return;try{const r=await apiRequest('/api/cache/clear/videos',{method:'POST'});if(!r)return;const d=await r.json();if(d.success){showToast(`视频缓存清理完成,已删除 ${d.data.deleted_count||0} 个文件`,'success');await loadCacheSize()}else{showToast('清理失败: '+(d.error||'未知错误'),'error')}}catch(e){showToast('清理失败: '+e.message,'error')}},
541
  clearCache=async()=>{if(!confirm('确定要清理缓存吗?此操作将删除 /data/temp 目录中的所有文件!'))return;try{const r=await apiRequest('/api/cache/clear',{method:'POST'});if(!r)return;const d=await r.json();if(d.success){showToast(`缓存清理完成,已删除 ${d.data.deleted_count||0} 个文件`,'success');await loadCacheSize()}else{showToast('清理失败: '+(d.error||'未知错误'),'error')}}catch(e){showToast('清理失败: '+e.message,'error')}},
542
+ saveGlobalSettings=async()=>{const gc={admin_username:$('cfgAdminUser').value,log_level:$('cfgLogLevel').value,image_cache_max_size_mb:parseInt($('cfgImageCacheMaxSize').value)||500,video_cache_max_size_mb:parseInt($('cfgVideoCacheMaxSize').value)||1000,base_url:$('cfgBaseUrl').value};if($('cfgAdminPass').value)gc.admin_password=$('cfgAdminPass').value;try{const r=await apiRequest('/api/settings');if(!r)return;const d=await r.json();if(!d.success)return showToast('加载配置失败','error');const s=await apiRequest('/api/settings',{method:'POST',body:JSON.stringify({global_config:gc,grok_config:d.data.grok})});if(!s)return;const sd=await s.json();sd.success?(showToast('全局配置保存成功','success'),$('cfgAdminPass').value=''):showToast('保存失败: '+(sd.error||'未知错误'),'error')}catch(e){showToast('保存失败: '+e.message,'error')}},
543
+ saveGrokSettings=async()=>{const kc={api_key:$('cfgApiKey').value,proxy_url:$('cfgProxyUrl').value,cf_clearance:$('cfgCfClearance').value,x_statsig_id:$('cfgStatsigId').value,filtered_tags:$('cfgFilteredTags').value,temporary:$('cfgTemporary').value==='true',stream_chunk_timeout:parseInt($('cfgStreamChunkTimeout').value)||120,stream_first_response_timeout:parseInt($('cfgStreamFirstResponseTimeout').value)||30,stream_total_timeout:parseInt($('cfgStreamTotalTimeout').value)||600};try{const r=await apiRequest('/api/settings');if(!r)return;const d=await r.json();if(!d.success)return showToast('加载配置失败','error');const s=await apiRequest('/api/settings',{method:'POST',body:JSON.stringify({global_config:d.data.global,grok_config:kc})});if(!s)return;const sd=await s.json();sd.success?showToast('Grok配置保存成功','success'):showToast('保存失败: '+(sd.error||'未知错误'),'error')}catch(e){showToast('保存失败: '+e.message,'error')}};
544
  window.addEventListener('DOMContentLoaded',()=>{checkAuth();refreshTokens();setInterval(()=>{loadStats();updateRemaining()},30000)});
545
  </script>
546
  </body>
data/setting.toml CHANGED
@@ -2,12 +2,18 @@
2
  admin_username = "admin"
3
  admin_password = "admin"
4
  log_level = "INFO"
5
- temp_max_size_mb = 500
 
6
  base_url = ""
7
 
 
 
8
  [grok]
9
  api_key = ""
10
  proxy_url = ""
 
 
 
11
  cf_clearance = ""
12
  temporary = true
13
  filtered_tags = "xaiartifact,xai:tool_usage_card,grok:render"
 
2
  admin_username = "admin"
3
  admin_password = "admin"
4
  log_level = "INFO"
5
+ image_cache_max_size_mb = 500
6
+ video_cache_max_size_mb = 1000
7
  base_url = ""
8
 
9
+
10
+
11
  [grok]
12
  api_key = ""
13
  proxy_url = ""
14
+ stream_chunk_timeout = 120
15
+ stream_first_response_timeout = 30
16
+ stream_total_timeout = 600
17
  cf_clearance = ""
18
  temporary = true
19
  filtered_tags = "xaiartifact,xai:tool_usage_card,grok:render"