Upload 11 files
Browse files- Dockerfile +33 -0
- app.py +33 -0
- auth.py +112 -0
- client.py +468 -0
- config.py +400 -0
- requirements.txt +4 -0
- routes.py +1043 -0
- static/css/styles.css +698 -0
- static/js/scripts.js +457 -0
- templates/stats.html +240 -0
- utils.py +158 -0
Dockerfile
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use official Python base image
|
| 2 |
+
FROM python:3.9-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory inside the container
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy requirements file
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
|
| 10 |
+
# Install dependencies
|
| 11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 12 |
+
|
| 13 |
+
# 复制核心应用文件
|
| 14 |
+
COPY app.py .
|
| 15 |
+
COPY auth.py .
|
| 16 |
+
COPY client.py .
|
| 17 |
+
COPY routes.py .
|
| 18 |
+
COPY utils.py .
|
| 19 |
+
COPY config.py .
|
| 20 |
+
COPY retry.py .
|
| 21 |
+
COPY static/ static/
|
| 22 |
+
COPY templates/ templates/
|
| 23 |
+
|
| 24 |
+
RUN chmod -R 0755 /app
|
| 25 |
+
|
| 26 |
+
# Expose the port (Flask 默认端口)
|
| 27 |
+
EXPOSE 5000
|
| 28 |
+
|
| 29 |
+
# 设置 UTF-8 避免中文乱码
|
| 30 |
+
ENV LANG=C.UTF-8
|
| 31 |
+
|
| 32 |
+
# 启动主程序
|
| 33 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from flask import Flask
|
| 3 |
+
from utils import logger
|
| 4 |
+
import config
|
| 5 |
+
from auth import start_cleanup_thread
|
| 6 |
+
from routes import register_routes
|
| 7 |
+
|
| 8 |
+
def create_app():
|
| 9 |
+
"""创建并配置Flask应用"""
|
| 10 |
+
config.init_config() # 调整到 create_app 开头
|
| 11 |
+
app = Flask(__name__)
|
| 12 |
+
|
| 13 |
+
# 启动会话清理线程
|
| 14 |
+
start_cleanup_thread()
|
| 15 |
+
|
| 16 |
+
# 注册路由
|
| 17 |
+
register_routes(app)
|
| 18 |
+
|
| 19 |
+
return app
|
| 20 |
+
|
| 21 |
+
if __name__ == "__main__":
|
| 22 |
+
# 初始化配置 # 已移至 create_app
|
| 23 |
+
|
| 24 |
+
# 创建应用
|
| 25 |
+
app = create_app()
|
| 26 |
+
|
| 27 |
+
# 获取端口
|
| 28 |
+
port = int(os.getenv("PORT", 7860))
|
| 29 |
+
print(f"[系统] Flask 应用将在 0.0.0.0:{port} 启动 (Flask 开发服)")
|
| 30 |
+
|
| 31 |
+
# 启动应用
|
| 32 |
+
flask_debug_mode = config.get_config_value("FLASK_DEBUG", default=False) # 从配置获取调试模式
|
| 33 |
+
app.run(host='0.0.0.0', port=port, debug=flask_debug_mode)
|
auth.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import time
|
| 3 |
+
from datetime import datetime, timedelta
|
| 4 |
+
from functools import wraps
|
| 5 |
+
# from flask import request, jsonify # 移除冗余导入
|
| 6 |
+
from utils import logger
|
| 7 |
+
import config
|
| 8 |
+
|
| 9 |
+
class RateLimiter:
|
| 10 |
+
"""请求速率限制器 (基于token/IP)"""
|
| 11 |
+
def __init__(self, limit_per_minute=None): # 允许传入参数,但优先配置
|
| 12 |
+
# 优先从配置读取,如果未配置或传入了明确值,则使用该值
|
| 13 |
+
# 配置项: "rate_limit"
|
| 14 |
+
configured_limit = config.get_config_value("rate_limit", default=60) # 默认60次/分钟
|
| 15 |
+
self.limit = limit_per_minute if limit_per_minute is not None else configured_limit
|
| 16 |
+
self.window_size = 60 # 窗口大小(秒)
|
| 17 |
+
self.requests = {} # {identifier: [timestamp1, timestamp2, ...]}
|
| 18 |
+
self.lock = threading.Lock()
|
| 19 |
+
|
| 20 |
+
def is_allowed(self, identifier: str) -> bool:
|
| 21 |
+
"""
|
| 22 |
+
检查标识符请求是否允许
|
| 23 |
+
|
| 24 |
+
参数:
|
| 25 |
+
identifier: 唯一标识 (token/IP)
|
| 26 |
+
|
| 27 |
+
返回:
|
| 28 |
+
bool: 允许则True,否则False
|
| 29 |
+
"""
|
| 30 |
+
with self.lock:
|
| 31 |
+
now = time.time()
|
| 32 |
+
if identifier not in self.requests:
|
| 33 |
+
self.requests[identifier] = []
|
| 34 |
+
|
| 35 |
+
# 清理过期请求
|
| 36 |
+
self.requests[identifier] = [t for t in self.requests[identifier] if now - t < self.window_size]
|
| 37 |
+
|
| 38 |
+
# 检查请求数是否超限
|
| 39 |
+
if len(self.requests[identifier]) >= self.limit:
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
# 记录当前请求
|
| 43 |
+
self.requests[identifier].append(now)
|
| 44 |
+
return True
|
| 45 |
+
|
| 46 |
+
def session_cleanup():
|
| 47 |
+
"""定期清理过期会话"""
|
| 48 |
+
# 获取配置
|
| 49 |
+
config_instance = config.config_instance
|
| 50 |
+
|
| 51 |
+
with config_instance.client_sessions_lock:
|
| 52 |
+
current_time = datetime.now()
|
| 53 |
+
total_expired = 0
|
| 54 |
+
|
| 55 |
+
# 遍历用户
|
| 56 |
+
for user_id in list(config_instance.client_sessions.keys()):
|
| 57 |
+
user_sessions = config_instance.client_sessions[user_id]
|
| 58 |
+
expired_accounts = []
|
| 59 |
+
|
| 60 |
+
# 遍历账户会话
|
| 61 |
+
for account_email, session_data in user_sessions.items():
|
| 62 |
+
last_time = session_data["last_time"]
|
| 63 |
+
if current_time - last_time > timedelta(minutes=config_instance.get('session_timeout_minutes')):
|
| 64 |
+
expired_accounts.append(account_email)
|
| 65 |
+
# 记录过期会话信息 (上下文/IP)
|
| 66 |
+
context_info = session_data.get("context", "无上下文")
|
| 67 |
+
ip_info = session_data.get("ip", "无IP")
|
| 68 |
+
# 上下文预览(前30字符),防日志过长
|
| 69 |
+
context_preview = context_info[:30] + "..." if len(context_info) > 30 else context_info
|
| 70 |
+
logger.debug(f"过期会话: 用户={user_id[:8]}..., 账户={account_email}, 上下文={context_preview}, IP={ip_info}")
|
| 71 |
+
|
| 72 |
+
# 删除过期账户会话
|
| 73 |
+
for account_email in expired_accounts:
|
| 74 |
+
del user_sessions[account_email]
|
| 75 |
+
total_expired += 1
|
| 76 |
+
|
| 77 |
+
# 若用户无会话,则删除
|
| 78 |
+
if not user_sessions:
|
| 79 |
+
del config_instance.client_sessions[user_id]
|
| 80 |
+
|
| 81 |
+
if total_expired:
|
| 82 |
+
logger.info(f"已清理 {total_expired} 个过期会话")
|
| 83 |
+
|
| 84 |
+
_cleanup_thread_started = False
|
| 85 |
+
_cleanup_thread_lock = threading.Lock()
|
| 86 |
+
|
| 87 |
+
def start_cleanup_thread():
|
| 88 |
+
"""启动会话定期清理线程 (幂等)"""
|
| 89 |
+
global _cleanup_thread_started
|
| 90 |
+
with _cleanup_thread_lock:
|
| 91 |
+
if _cleanup_thread_started:
|
| 92 |
+
logger.debug("会话清理线程已运行,跳过此次启动。")
|
| 93 |
+
return
|
| 94 |
+
|
| 95 |
+
def cleanup_worker():
|
| 96 |
+
while True:
|
| 97 |
+
# 循环内获取最新配置,防动态更新
|
| 98 |
+
try:
|
| 99 |
+
timeout_minutes = config.get_config_value('session_timeout_minutes', default=30) # 默认值
|
| 100 |
+
sleep_interval = timeout_minutes * 60 / 2
|
| 101 |
+
if sleep_interval <= 0: # 防无效休眠间隔
|
| 102 |
+
logger.warning(f"无效会话清理休眠间隔: {sleep_interval}s, 用默认15分钟。")
|
| 103 |
+
sleep_interval = 15 * 60
|
| 104 |
+
time.sleep(sleep_interval)
|
| 105 |
+
session_cleanup()
|
| 106 |
+
except Exception as e:
|
| 107 |
+
logger.error(f"会话清理线程异常: {e}", exc_info=True) # 添加 exc_info=True 获取更详细的堆栈
|
| 108 |
+
|
| 109 |
+
cleanup_thread = threading.Thread(target=cleanup_worker, daemon=True, name="SessionCleanupThread")
|
| 110 |
+
cleanup_thread.start()
|
| 111 |
+
_cleanup_thread_started = True
|
| 112 |
+
logger.info("会话清理线程启动成功。")
|
client.py
ADDED
|
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import json
|
| 3 |
+
import base64
|
| 4 |
+
import threading
|
| 5 |
+
import time
|
| 6 |
+
import uuid
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Dict, Optional, Any
|
| 9 |
+
|
| 10 |
+
from utils import logger, mask_email
|
| 11 |
+
import config
|
| 12 |
+
from retry import with_retry
|
| 13 |
+
|
| 14 |
+
class OnDemandAPIClient:
|
| 15 |
+
"""OnDemand API 客户端,处理认证、会话管理和查询"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, email: str, password: str, client_id: str = "default_client"):
|
| 18 |
+
"""初始化客户端
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
email: OnDemand账户邮箱
|
| 22 |
+
password: OnDemand账户密码
|
| 23 |
+
client_id: 客户端标识符,用于日志记录
|
| 24 |
+
"""
|
| 25 |
+
self.email = email
|
| 26 |
+
self.password = password
|
| 27 |
+
self.client_id = client_id
|
| 28 |
+
self.token = ""
|
| 29 |
+
self.refresh_token = ""
|
| 30 |
+
self.user_id = ""
|
| 31 |
+
self.company_id = ""
|
| 32 |
+
self.session_id = ""
|
| 33 |
+
self.base_url = "https://gateway.on-demand.io/v1"
|
| 34 |
+
self.chat_base_url = "https://api.on-demand.io/chat/v1/client" # 恢复为原始路径
|
| 35 |
+
self.last_error: Optional[str] = None
|
| 36 |
+
self.last_activity = datetime.now()
|
| 37 |
+
self.lock = threading.RLock() # 可重入锁,用于线程安全操作
|
| 38 |
+
|
| 39 |
+
# 新增属性
|
| 40 |
+
self._associated_user_identifier: Optional[str] = None
|
| 41 |
+
self._associated_request_ip: Optional[str] = None
|
| 42 |
+
self._current_request_context_hash: Optional[str] = None # 新增:用于暂存当前请求的上下文哈希
|
| 43 |
+
|
| 44 |
+
# 隐藏密码的日志
|
| 45 |
+
masked_email = mask_email(email)
|
| 46 |
+
logger.info(f"已为 {masked_email} 初始化 OnDemandAPIClient (ID: {client_id})")
|
| 47 |
+
|
| 48 |
+
def _log(self, message: str, level: str = "INFO"):
|
| 49 |
+
"""内部日志方法,使用结构化日志记录
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
message: 日志消息
|
| 53 |
+
level: 日志级别
|
| 54 |
+
"""
|
| 55 |
+
masked_email = mask_email(self.email)
|
| 56 |
+
log_method = getattr(logger, level.lower(), logger.info)
|
| 57 |
+
log_method(f"[{self.client_id} / {masked_email}] {message}")
|
| 58 |
+
self.last_activity = datetime.now() # 更新最后活动时间
|
| 59 |
+
|
| 60 |
+
def get_authorization(self) -> str:
|
| 61 |
+
"""生成登录用 Basic Authorization 头"""
|
| 62 |
+
text = f"{self.email}:{self.password}"
|
| 63 |
+
encoded = base64.b64encode(text.encode("utf-8")).decode("utf-8")
|
| 64 |
+
return encoded
|
| 65 |
+
|
| 66 |
+
def _do_request(self, method: str, url: str, headers: Dict[str, str],
|
| 67 |
+
data: Optional[Dict] = None, stream: bool = False,
|
| 68 |
+
timeout: int = None) -> requests.Response:
|
| 69 |
+
"""执行HTTP请求的实际逻辑,不包含重试
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
method: HTTP方法 (GET, POST等)
|
| 73 |
+
url: 请求URL
|
| 74 |
+
headers: HTTP头
|
| 75 |
+
data: 请求数据
|
| 76 |
+
stream: 是否使用流式传输
|
| 77 |
+
timeout: 请求超时时间
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
requests.Response对象
|
| 81 |
+
|
| 82 |
+
Raises:
|
| 83 |
+
requests.exceptions.RequestException: 请求失败
|
| 84 |
+
"""
|
| 85 |
+
if method.upper() == 'GET':
|
| 86 |
+
response = requests.get(url, headers=headers, stream=stream, timeout=timeout)
|
| 87 |
+
elif method.upper() == 'POST':
|
| 88 |
+
json_data = json.dumps(data) if data else None
|
| 89 |
+
response = requests.post(url, data=json_data, headers=headers, stream=stream, timeout=timeout)
|
| 90 |
+
else:
|
| 91 |
+
raise ValueError(f"不支持的HTTP方法: {method}")
|
| 92 |
+
|
| 93 |
+
response.raise_for_status()
|
| 94 |
+
return response
|
| 95 |
+
|
| 96 |
+
@with_retry()
|
| 97 |
+
def sign_in(self, context: Optional[str] = None) -> bool:
|
| 98 |
+
"""登录以获取 token, refreshToken, userId, 和 companyId"""
|
| 99 |
+
with self.lock: # 线程安全
|
| 100 |
+
self.last_error = None
|
| 101 |
+
url = f"{self.base_url}/auth/user/signin"
|
| 102 |
+
payload = {"accountType": "default"}
|
| 103 |
+
headers = {
|
| 104 |
+
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0",
|
| 105 |
+
'Accept': "application/json, text/plain, */*",
|
| 106 |
+
'Content-Type': "application/json",
|
| 107 |
+
'Authorization': f"Basic {self.get_authorization()}", # 登录时使用 Basic 认证
|
| 108 |
+
'Referer': "https://app.on-demand.io/"
|
| 109 |
+
}
|
| 110 |
+
if context:
|
| 111 |
+
self._current_request_context_hash = context
|
| 112 |
+
|
| 113 |
+
try:
|
| 114 |
+
masked_email = mask_email(self.email)
|
| 115 |
+
self._log(f"尝试登录 {masked_email}...")
|
| 116 |
+
|
| 117 |
+
# 使用不带重试的_do_request,因为重试逻辑由装饰器处理
|
| 118 |
+
response = self._do_request('POST', url, headers, payload, timeout=config.get_config_value('request_timeout'))
|
| 119 |
+
data = response.json()
|
| 120 |
+
|
| 121 |
+
if config.get_config_value('debug_mode'):
|
| 122 |
+
# 在调试模式下记录响应,但隐藏敏感信息
|
| 123 |
+
debug_data = data.copy()
|
| 124 |
+
if 'data' in debug_data and 'tokenData' in debug_data['data']:
|
| 125 |
+
debug_data['data']['tokenData']['token'] = '***REDACTED***'
|
| 126 |
+
debug_data['data']['tokenData']['refreshToken'] = '***REDACTED***'
|
| 127 |
+
self._log(f"登录原始响应: {json.dumps(debug_data, indent=2, ensure_ascii=False)}", "DEBUG")
|
| 128 |
+
|
| 129 |
+
self.token = data.get('data', {}).get('tokenData', {}).get('token', '')
|
| 130 |
+
self.refresh_token = data.get('data', {}).get('tokenData', {}).get('refreshToken', '')
|
| 131 |
+
self.user_id = data.get('data', {}).get('user', {}).get('userId', '')
|
| 132 |
+
self.company_id = data.get('data', {}).get('user', {}).get('default_company_id', '')
|
| 133 |
+
|
| 134 |
+
if self.token and self.user_id and self.company_id:
|
| 135 |
+
self._log(f"登录成功。已获取必要的凭证。")
|
| 136 |
+
return True
|
| 137 |
+
else:
|
| 138 |
+
self.last_error = "登录成功,但未能从响应中提取必要的字段。"
|
| 139 |
+
self._log(f"登录失败: {self.last_error}", level="ERROR")
|
| 140 |
+
return False
|
| 141 |
+
|
| 142 |
+
except requests.exceptions.RequestException as e:
|
| 143 |
+
self.last_error = f"登录请求失败: {e}"
|
| 144 |
+
self._log(f"登录失败: {e}", level="ERROR")
|
| 145 |
+
raise # 重新抛出异常,让装饰器处理重试
|
| 146 |
+
|
| 147 |
+
except json.JSONDecodeError as e:
|
| 148 |
+
self.last_error = f"登录 JSON 解码失败: {e}. 响应文本: {response.text if 'response' in locals() else 'N/A'}"
|
| 149 |
+
self._log(self.last_error, level="ERROR")
|
| 150 |
+
return False
|
| 151 |
+
|
| 152 |
+
except Exception as e:
|
| 153 |
+
self.last_error = f"登录过程中发生意外错误: {e}"
|
| 154 |
+
self._log(self.last_error, level="ERROR")
|
| 155 |
+
return False
|
| 156 |
+
|
| 157 |
+
@with_retry()
|
| 158 |
+
def refresh_token_if_needed(self) -> bool:
|
| 159 |
+
"""如果令牌过期或无效,则刷新令牌
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
bool: 刷新成功返回True,否则返回False
|
| 163 |
+
"""
|
| 164 |
+
with self.lock: # 线程安全
|
| 165 |
+
self.last_error = None
|
| 166 |
+
if not self.refresh_token:
|
| 167 |
+
self.last_error = "没有可用的 refresh token 来刷新令牌。"
|
| 168 |
+
self._log(self.last_error, level="WARNING")
|
| 169 |
+
return False
|
| 170 |
+
|
| 171 |
+
url = f"{self.base_url}/auth/user/refresh_token"
|
| 172 |
+
payload = {"data": {"token": self.token, "refreshToken": self.refresh_token}}
|
| 173 |
+
headers = {'Content-Type': "application/json"}
|
| 174 |
+
|
| 175 |
+
try:
|
| 176 |
+
self._log("尝试刷新令牌...")
|
| 177 |
+
|
| 178 |
+
# 使用不带重试的_do_request,因为重试逻辑由装饰器处理
|
| 179 |
+
response = self._do_request('POST', url, headers, payload, timeout=config.get_config_value('request_timeout'))
|
| 180 |
+
data = response.json()
|
| 181 |
+
|
| 182 |
+
if config.get_config_value('debug_mode'):
|
| 183 |
+
# 在调试模式下记录响应,但隐藏敏感信息
|
| 184 |
+
debug_data = data.copy()
|
| 185 |
+
if 'data' in debug_data:
|
| 186 |
+
if 'token' in debug_data['data']:
|
| 187 |
+
debug_data['data']['token'] = '***REDACTED***'
|
| 188 |
+
if 'refreshToken' in debug_data['data']:
|
| 189 |
+
debug_data['data']['refreshToken'] = '***REDACTED***'
|
| 190 |
+
self._log(f"刷新令牌原始响应: {json.dumps(debug_data, indent=2, ensure_ascii=False)}", "DEBUG")
|
| 191 |
+
|
| 192 |
+
new_token = data.get('data', {}).get('token', '')
|
| 193 |
+
new_refresh_token = data.get('data', {}).get('refreshToken', '') # OnDemand 可能不总返回新的 refresh token
|
| 194 |
+
|
| 195 |
+
if new_token:
|
| 196 |
+
self.token = new_token
|
| 197 |
+
if new_refresh_token: # 仅当返回了新的 refresh token 时才更新
|
| 198 |
+
self.refresh_token = new_refresh_token
|
| 199 |
+
self._log("令牌刷新成功。")
|
| 200 |
+
return True
|
| 201 |
+
else:
|
| 202 |
+
self.last_error = "令牌刷新成功,但响应中没有新的 token。"
|
| 203 |
+
self._log(f"令牌刷新失败: {self.last_error}", level="ERROR")
|
| 204 |
+
return False
|
| 205 |
+
|
| 206 |
+
except requests.exceptions.RequestException as e:
|
| 207 |
+
self.last_error = f"令牌刷新请求失败: {e}"
|
| 208 |
+
self._log(f"令牌刷新失败: {e}", level="ERROR")
|
| 209 |
+
|
| 210 |
+
# 如果是认证错误,可能需要完全重新登录
|
| 211 |
+
if hasattr(e, 'response') and e.response is not None and e.response.status_code == 401:
|
| 212 |
+
self._log("令牌刷新返回401错误,可能需要完全重新登录", level="WARNING")
|
| 213 |
+
|
| 214 |
+
raise # 重新抛出异常,让装饰器处理重试
|
| 215 |
+
|
| 216 |
+
except json.JSONDecodeError as e:
|
| 217 |
+
self.last_error = f"令牌刷新 JSON 解码失败: {e}. 响应文本: {response.text if 'response' in locals() else 'N/A'}"
|
| 218 |
+
self._log(self.last_error, level="ERROR")
|
| 219 |
+
return False
|
| 220 |
+
|
| 221 |
+
except Exception as e:
|
| 222 |
+
self.last_error = f"令牌刷新过程中发生意外错误: {e}"
|
| 223 |
+
self._log(self.last_error, level="ERROR")
|
| 224 |
+
return False
|
| 225 |
+
|
| 226 |
+
@with_retry()
|
| 227 |
+
def create_session(self, external_user_id: str = "openai-adapter-user", external_context: Optional[str] = None) -> bool:
|
| 228 |
+
"""为聊天创建一个新会话
|
| 229 |
+
|
| 230 |
+
Args:
|
| 231 |
+
external_user_id: 外部用户ID前缀,会附加UUID确保唯一性
|
| 232 |
+
external_context: 外部上下文哈希 (可选)
|
| 233 |
+
|
| 234 |
+
Returns:
|
| 235 |
+
bool: 创建成功返回True,否则返回False
|
| 236 |
+
"""
|
| 237 |
+
with self.lock: # 线程安全
|
| 238 |
+
self.last_error = None
|
| 239 |
+
if external_context:
|
| 240 |
+
self._current_request_context_hash = external_context
|
| 241 |
+
if not self.token or not self.user_id or not self.company_id:
|
| 242 |
+
self.last_error = "创建会话缺少 token, user_id, 或 company_id。正在尝试登录。"
|
| 243 |
+
self._log(self.last_error, level="WARNING")
|
| 244 |
+
if not self.sign_in(): # 如果未登录,尝试登录
|
| 245 |
+
self.last_error = f"无法创建会话:登录失败。最近的客户端错误: {self.last_error}"
|
| 246 |
+
return False # 如果登录失败,则无法继续
|
| 247 |
+
|
| 248 |
+
url = f"{self.chat_base_url}/sessions"
|
| 249 |
+
# 确保 externalUserId 对于每个会话是唯一的,以避免冲突
|
| 250 |
+
unique_id = f"{external_user_id}-{uuid.uuid4().hex}"
|
| 251 |
+
payload = {"externalUserId": unique_id, "pluginIds": []}
|
| 252 |
+
headers = {
|
| 253 |
+
'Content-Type': "application/json",
|
| 254 |
+
'Authorization': f"Bearer {self.token}", # 恢复为原始认证方式
|
| 255 |
+
'x-company-id': self.company_id,
|
| 256 |
+
'x-user-id': self.user_id
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
self._log(f"尝试创建会话,company_id: {self.company_id}, user_id: {self.user_id}, external_id: {unique_id}")
|
| 260 |
+
|
| 261 |
+
try:
|
| 262 |
+
try:
|
| 263 |
+
# 首先尝试创建会话,使用不带重试的_do_request
|
| 264 |
+
response = self._do_request('POST', url, headers, payload, timeout=config.get_config_value('request_timeout'))
|
| 265 |
+
except requests.exceptions.HTTPError as e:
|
| 266 |
+
# 如果是401错误,尝试刷新令牌
|
| 267 |
+
if e.response.status_code == 401:
|
| 268 |
+
self._log("创建会话时令牌过期,尝试刷新...", level="INFO")
|
| 269 |
+
if self.refresh_token_if_needed():
|
| 270 |
+
headers['Authorization'] = f"Bearer {self.token}" # 使用新令牌更新头
|
| 271 |
+
response = self._do_request('POST', url, headers, payload, timeout=config.get_config_value('request_timeout'))
|
| 272 |
+
else: # 刷新失败,尝试完全重新登录
|
| 273 |
+
self._log("令牌刷新失败。尝试完全重新登录以创建会话。", level="WARNING")
|
| 274 |
+
if self.sign_in():
|
| 275 |
+
headers['Authorization'] = f"Bearer {self.token}"
|
| 276 |
+
response = self._do_request('POST', url, headers, payload, timeout=config.get_config_value('request_timeout'))
|
| 277 |
+
else:
|
| 278 |
+
self.last_error = f"会话创建失败:令牌刷新和重新登录均失败。最近的客户端错误: {self.last_error}"
|
| 279 |
+
self._log(self.last_error, level="ERROR")
|
| 280 |
+
return False
|
| 281 |
+
else:
|
| 282 |
+
# 其他HTTP错误,直接抛出
|
| 283 |
+
raise
|
| 284 |
+
|
| 285 |
+
data = response.json()
|
| 286 |
+
|
| 287 |
+
if config.get_config_value('debug_mode'):
|
| 288 |
+
self._log(f"创建会话原始响应: {json.dumps(data, indent=2, ensure_ascii=False)}", "DEBUG")
|
| 289 |
+
|
| 290 |
+
session_id_val = data.get('data', {}).get('id', '')
|
| 291 |
+
if session_id_val:
|
| 292 |
+
self.session_id = session_id_val
|
| 293 |
+
self._log(f"会话创建成功。会话 ID: {self.session_id}")
|
| 294 |
+
return True
|
| 295 |
+
else:
|
| 296 |
+
self.last_error = f"会话创建成功,但响应中没有会话 ID。"
|
| 297 |
+
self._log(f"会话创建失败: {self.last_error}", level="ERROR")
|
| 298 |
+
return False
|
| 299 |
+
|
| 300 |
+
except requests.exceptions.RequestException as e:
|
| 301 |
+
self.last_error = f"会话创建请求失败: {e}"
|
| 302 |
+
self._log(f"会话创建失败: {e}", level="ERROR")
|
| 303 |
+
raise # 重新抛出异常,让装饰器处理重试
|
| 304 |
+
|
| 305 |
+
except json.JSONDecodeError as e:
|
| 306 |
+
self.last_error = f"会话创建 JSON 解码失败: {e}. 响应文本: {response.text if 'response' in locals() else 'N/A'}"
|
| 307 |
+
self._log(self.last_error, level="ERROR")
|
| 308 |
+
return False
|
| 309 |
+
|
| 310 |
+
except Exception as e:
|
| 311 |
+
self.last_error = f"会话创建过程中发生意外错误: {e}"
|
| 312 |
+
self._log(self.last_error, level="ERROR")
|
| 313 |
+
return False
|
| 314 |
+
|
| 315 |
+
@with_retry()
|
| 316 |
+
def send_query(self, query: str, endpoint_id: str = "predefined-claude-3.7-sonnet",
|
| 317 |
+
stream: bool = False, model_configs_input: Optional[Dict] = None,
|
| 318 |
+
full_query_override: Optional[str] = None) -> Dict:
|
| 319 |
+
"""向聊天会话发送查询,并处理流式或非流式响应
|
| 320 |
+
|
| 321 |
+
Args:
|
| 322 |
+
query: 查询文本 (如果提供了 full_query_override,则此参数被忽略)
|
| 323 |
+
endpoint_id: OnDemand端点ID
|
| 324 |
+
stream: 是否使用流式响应
|
| 325 |
+
model_configs_input: 模型配置参数,如temperature、maxTokens等
|
| 326 |
+
|
| 327 |
+
Returns:
|
| 328 |
+
Dict: 包含响应内容或流对象的字典
|
| 329 |
+
"""
|
| 330 |
+
with self.lock: # 线程安全
|
| 331 |
+
self.last_error = None
|
| 332 |
+
|
| 333 |
+
# 会话检查和创建
|
| 334 |
+
if not self.session_id:
|
| 335 |
+
self.last_error = "没有可用的会话 ID。正在尝试创建新会话。"
|
| 336 |
+
self._log(self.last_error, level="WARNING")
|
| 337 |
+
if not self.create_session():
|
| 338 |
+
self.last_error = f"查询失败:会话创建失败。最近的客户端错误: {self.last_error}"
|
| 339 |
+
self._log(self.last_error, level="ERROR")
|
| 340 |
+
return {"error": self.last_error}
|
| 341 |
+
|
| 342 |
+
if not self.token:
|
| 343 |
+
self.last_error = "发送查询没有可用的 token。"
|
| 344 |
+
self._log(self.last_error, level="ERROR")
|
| 345 |
+
return {"error": self.last_error}
|
| 346 |
+
|
| 347 |
+
url = f"{self.chat_base_url}/sessions/{self.session_id}/query"
|
| 348 |
+
|
| 349 |
+
# 处理 query 输入
|
| 350 |
+
current_query = ""
|
| 351 |
+
if query is None:
|
| 352 |
+
self._log("警告:查询内容为None,已替换为空字符串", level="WARNING")
|
| 353 |
+
elif not isinstance(query, str):
|
| 354 |
+
current_query = str(query)
|
| 355 |
+
self._log(f"警告:查询内容不是字符串类型,已转换为字符串: {type(query)} -> {type(current_query)}", level="WARNING")
|
| 356 |
+
else:
|
| 357 |
+
current_query = query
|
| 358 |
+
|
| 359 |
+
# 优先使用 full_query_override
|
| 360 |
+
query_to_send = full_query_override if full_query_override is not None else current_query
|
| 361 |
+
if full_query_override is not None:
|
| 362 |
+
self._log(f"使用 full_query_override (长度: {len(full_query_override)}) 代替原始 query。", "DEBUG")
|
| 363 |
+
|
| 364 |
+
payload = {
|
| 365 |
+
"endpointId": endpoint_id,
|
| 366 |
+
"query": query_to_send, # 使用处理后的 query 或 override
|
| 367 |
+
"pluginIds": [],
|
| 368 |
+
"responseMode": "stream" if stream else "sync",
|
| 369 |
+
"debugMode": "on" if config.get_config_value('debug_mode') else "off",
|
| 370 |
+
"fulfillmentOnly": False
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
# 处理 model_configs_input
|
| 374 |
+
if model_configs_input:
|
| 375 |
+
# 直接使用传入的 model_configs_input,只包含非 None 值
|
| 376 |
+
# API 应该能处理额外的、非预期的配置项,或者忽略它们
|
| 377 |
+
# 如果API严格要求特定字段,那么这里的逻辑需要更精确地过滤
|
| 378 |
+
processed_model_configs = {k: v for k, v in model_configs_input.items() if v is not None}
|
| 379 |
+
if processed_model_configs: # 只有当有有效配置时才添加modelConfigs
|
| 380 |
+
payload["modelConfigs"] = processed_model_configs
|
| 381 |
+
|
| 382 |
+
self._log(f"最终的payload: {json.dumps(payload, ensure_ascii=False)}", level="DEBUG")
|
| 383 |
+
|
| 384 |
+
headers = {
|
| 385 |
+
'Content-Type': "application/json",
|
| 386 |
+
'Authorization': f"Bearer {self.token}",
|
| 387 |
+
'x-company-id': self.company_id
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
truncated_query_log = current_query[:100] + "..." if len(current_query) > 100 else current_query
|
| 391 |
+
self._log(f"向端点 {endpoint_id} 发送查询 (stream={stream})。查询内容: {truncated_query_log}")
|
| 392 |
+
|
| 393 |
+
try:
|
| 394 |
+
response = self._do_request('POST', url, headers, payload, stream=True, timeout=config.get_config_value('stream_timeout'))
|
| 395 |
+
|
| 396 |
+
if stream:
|
| 397 |
+
self._log("返回流式响应对象供外部处理")
|
| 398 |
+
return {"stream": True, "response_obj": response}
|
| 399 |
+
else: # stream (方法参数) 为 False
|
| 400 |
+
full_answer = ""
|
| 401 |
+
try:
|
| 402 |
+
# 既然 _do_request 总是 stream=True,我们仍然需要消耗这个流。
|
| 403 |
+
# OnDemand API 在 responseMode="sync" 时,理论上应该直接返回完整内容。
|
| 404 |
+
|
| 405 |
+
response_body = response.text # 读取整个响应体
|
| 406 |
+
response.close() # 确保连接关闭
|
| 407 |
+
|
| 408 |
+
self._log(f"非流式响应原始文本 (前500字符): {response_body[:500]}", "DEBUG")
|
| 409 |
+
|
| 410 |
+
try:
|
| 411 |
+
# 优先尝试将整个响应体按单个JSON对象解析
|
| 412 |
+
data = json.loads(response_body)
|
| 413 |
+
if isinstance(data, dict):
|
| 414 |
+
if "answer" in data and isinstance(data["answer"], str):
|
| 415 |
+
full_answer = data["answer"]
|
| 416 |
+
elif "content" in data and isinstance(data["content"], str): # 备选字段
|
| 417 |
+
full_answer = data["content"]
|
| 418 |
+
elif data.get("eventType") == "fulfillment" and "answer" in data:
|
| 419 |
+
full_answer = data.get("answer", "")
|
| 420 |
+
else:
|
| 421 |
+
if not full_answer: # 避免覆盖已找到的答案
|
| 422 |
+
self._log(f"非流式响应解析为JSON后,未在顶层或常见字段找到答案: {response_body[:200]}", "WARNING")
|
| 423 |
+
else:
|
| 424 |
+
self._log(f"非流式响应解析为JSON后,不是字典类型: {type(data)}", "WARNING")
|
| 425 |
+
|
| 426 |
+
except json.JSONDecodeError:
|
| 427 |
+
# 如果直接解析JSON失败,再尝试按行解析SSE(作为后备)
|
| 428 |
+
self._log(f"非流式响应直接解析JSON失败,尝试按SSE行解析: {response_body[:200]}", "WARNING")
|
| 429 |
+
for line in response_body.splitlines():
|
| 430 |
+
if line:
|
| 431 |
+
decoded_line = line #已经是str
|
| 432 |
+
if decoded_line.startswith("data:"):
|
| 433 |
+
json_str = decoded_line[len("data:"):].strip()
|
| 434 |
+
if json_str == "[DONE]":
|
| 435 |
+
break
|
| 436 |
+
try:
|
| 437 |
+
event_data = json.loads(json_str)
|
| 438 |
+
if event_data.get("eventType", "") == "fulfillment":
|
| 439 |
+
full_answer += event_data.get("answer", "")
|
| 440 |
+
except json.JSONDecodeError:
|
| 441 |
+
self._log(f"非流式后备SSE解析时 JSONDecodeError: {json_str}", level="WARNING")
|
| 442 |
+
continue
|
| 443 |
+
|
| 444 |
+
self._log(f"非流式响应接收完毕。聚合内容长度: {len(full_answer)}")
|
| 445 |
+
return {"stream": False, "content": full_answer}
|
| 446 |
+
|
| 447 |
+
except requests.exceptions.RequestException as e: # 这应该在 _do_request 中捕获并重试
|
| 448 |
+
self.last_error = f"非流式请求时发生错误: {e}"
|
| 449 |
+
self._log(self.last_error, level="ERROR")
|
| 450 |
+
# 如果 _do_request 抛异常到这里,说明重试也失败了
|
| 451 |
+
# raise e # 或者返回错误结构体,让上层处理
|
| 452 |
+
return {"error": self.last_error, "stream": False, "content": ""}
|
| 453 |
+
except Exception as e:
|
| 454 |
+
self.last_error = f"非流式处理中发生意外错误: {e}"
|
| 455 |
+
self._log(self.last_error, level="ERROR")
|
| 456 |
+
return {"error": self.last_error, "stream": False, "content": ""}
|
| 457 |
+
|
| 458 |
+
except requests.exceptions.RequestException as e:
|
| 459 |
+
self.last_error = f"请求失败: {e}"
|
| 460 |
+
self._log(f"查询失败: {e}", level="ERROR")
|
| 461 |
+
raise
|
| 462 |
+
|
| 463 |
+
except Exception as e:
|
| 464 |
+
error_message = f"send_query 过程中发生意外错误: {e}"
|
| 465 |
+
error_type = type(e).__name__
|
| 466 |
+
self.last_error = error_message
|
| 467 |
+
self._log(f"{error_message} (错误类型: {error_type})", level="CRITICAL")
|
| 468 |
+
return {"error": str(e)}
|
config.py
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import time
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
import threading
|
| 6 |
+
from typing import Dict, List, Any, Optional, Union, get_type_hints
|
| 7 |
+
from datetime import datetime, timedelta
|
| 8 |
+
from utils import logger, load_config
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Config:
|
| 12 |
+
"""配置管理类,用于存储和管理所有配置"""
|
| 13 |
+
|
| 14 |
+
# 默认配置值
|
| 15 |
+
_defaults = {
|
| 16 |
+
"ondemand_session_timeout_minutes": 30, # OnDemand 会话的活跃超时时间(分钟)
|
| 17 |
+
"session_timeout_minutes": 3600, # 会话不活动超时时间(分钟)- 增加以减少创建新会话的频率
|
| 18 |
+
"max_retries": 5, # 默认重试次数 - 增加以处理更多错误
|
| 19 |
+
"retry_delay": 3, # 默认重试延迟(秒)- 增加以减少请求频率
|
| 20 |
+
"request_timeout": 45, # 默认请求超时(秒)- 增加以允许更长的处理时间
|
| 21 |
+
"stream_timeout": 180, # 流式请求的默认超时(秒)- 增加以允许更长的处理时间
|
| 22 |
+
"rate_limit": 30, # 默认速率限制(每分钟请求数)- 减少以避免触发API速率限制
|
| 23 |
+
"account_cooldown_seconds": 300, # 账户冷却期(秒)- 在遇到429错误后暂时不使用该账户
|
| 24 |
+
"debug_mode": False, # 调试模式
|
| 25 |
+
"api_access_token": "sk-2api-ondemand-access-token-2025", # API访问认证Token
|
| 26 |
+
"stats_file_path": "stats_data.json", # 统计数据文件路径
|
| 27 |
+
"stats_backup_path": "stats_data_backup.json", # 统计数据备份文件路径
|
| 28 |
+
"stats_save_interval": 300, # 每5分钟保存一次统计数据
|
| 29 |
+
"max_history_items": 1000, # 最多保存的历史记录数量
|
| 30 |
+
"default_endpoint_id": "predefined-claude-3.7-sonnet" # 备用/默认端点 ID
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
# 模型名称映射:OpenAI 模型名 -> on-demand.io endpointId
|
| 34 |
+
_model_mapping = {
|
| 35 |
+
"gpt-3.5-turbo": "predefined-openai-gpto3-mini",
|
| 36 |
+
"gpto3-mini": "predefined-openai-gpto3-mini",
|
| 37 |
+
"gpt-4o": "predefined-openai-gpt4o",
|
| 38 |
+
"gpt-4o-mini": "predefined-openai-gpt4o-mini",
|
| 39 |
+
"gpt-4-turbo": "predefined-openai-gpt4.1", # gpt-4.1 的别名
|
| 40 |
+
"gpt-4.1": "predefined-openai-gpt4.1",
|
| 41 |
+
"gpt-4.1-mini": "predefined-openai-gpt4.1-mini",
|
| 42 |
+
"gpt-4.1-nano": "predefined-openai-gpt4.1-nano",
|
| 43 |
+
"deepseek-v3": "predefined-deepseek-v3",
|
| 44 |
+
"deepseek-r1": "predefined-deepseek-r1",
|
| 45 |
+
"claude-3.5-sonnet": "predefined-claude-3.5-sonnet",
|
| 46 |
+
"claude-3.7-sonnet": "predefined-claude-3.7-sonnet",
|
| 47 |
+
"claude-3-opus": "predefined-claude-3-opus",
|
| 48 |
+
"claude-3-haiku": "predefined-claude-3-haiku",
|
| 49 |
+
"gemini-1.5-pro": "predefined-gemini-2.0-flash",
|
| 50 |
+
"gemini-2.0-flash": "predefined-gemini-2.0-flash",
|
| 51 |
+
# 根据需要添加更多映射
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
def __init__(self):
|
| 55 |
+
"""初始化配置对象"""
|
| 56 |
+
# 从默认值初始化配置
|
| 57 |
+
self._config = self._defaults.copy()
|
| 58 |
+
|
| 59 |
+
# 用量统计
|
| 60 |
+
self.usage_stats = {
|
| 61 |
+
"total_requests": 0,
|
| 62 |
+
"successful_requests": 0,
|
| 63 |
+
"failed_requests": 0,
|
| 64 |
+
"model_usage": defaultdict(int), # 模型使用次数
|
| 65 |
+
"account_usage": defaultdict(int), # 账户使用次数
|
| 66 |
+
"daily_usage": defaultdict(int), # 每日使用次数
|
| 67 |
+
"hourly_usage": defaultdict(int), # 每小时使用次数
|
| 68 |
+
"request_history": [], # 请求历史记录
|
| 69 |
+
"total_prompt_tokens": 0, # 总提示tokens
|
| 70 |
+
"total_completion_tokens": 0, # 总完成tokens
|
| 71 |
+
"total_tokens": 0, # 总tokens
|
| 72 |
+
"model_tokens": defaultdict(int), # 每个模型的tokens使用量
|
| 73 |
+
"daily_tokens": defaultdict(int), # 每日tokens使用量
|
| 74 |
+
"hourly_tokens": defaultdict(int), # 每小时tokens使用量
|
| 75 |
+
"last_saved": datetime.now().isoformat() # 最后保存时间
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
# 线程锁
|
| 79 |
+
self.usage_stats_lock = threading.Lock() # 用于线程安全的统计数据访问
|
| 80 |
+
self.account_index_lock = threading.Lock() # 用于线程安全的账户选择
|
| 81 |
+
self.client_sessions_lock = threading.Lock() # 用于线程安全的会话管理
|
| 82 |
+
|
| 83 |
+
# 当前账户索引(用于创建新客户端会话时的轮询选择)
|
| 84 |
+
self.current_account_index = 0
|
| 85 |
+
|
| 86 |
+
# 内存中存储每个客户端的会话和最后交互时间
|
| 87 |
+
# 格式: {用户标识符: {账户邮箱: {"client": OnDemandAPIClient实例, "last_time": datetime对象}}}
|
| 88 |
+
# 这样确保不同用户的会话是隔离的,每个用户只能访问自己的会话
|
| 89 |
+
self.client_sessions = {}
|
| 90 |
+
|
| 91 |
+
# 账户信息
|
| 92 |
+
self.accounts = []
|
| 93 |
+
|
| 94 |
+
# 账户冷却期记录 - 存储因速率限制而暂时不使用的账户
|
| 95 |
+
# 格式: {账户邮箱: 冷却期结束时间(datetime对象)}
|
| 96 |
+
self.account_cooldowns = {}
|
| 97 |
+
|
| 98 |
+
def get(self, key: str, default: Any = None) -> Any:
|
| 99 |
+
"""获取配置值"""
|
| 100 |
+
return self._config.get(key, default)
|
| 101 |
+
|
| 102 |
+
def set(self, key: str, value: Any) -> None:
|
| 103 |
+
"""设置配置值"""
|
| 104 |
+
self._config[key] = value
|
| 105 |
+
|
| 106 |
+
def update(self, config_dict: Dict[str, Any]) -> None:
|
| 107 |
+
"""批量更新配置值"""
|
| 108 |
+
self._config.update(config_dict)
|
| 109 |
+
|
| 110 |
+
def get_model_endpoint(self, model_name: str) -> str:
|
| 111 |
+
"""获取模型对应的端点ID"""
|
| 112 |
+
return self._model_mapping.get(model_name, self.get("default_endpoint_id"))
|
| 113 |
+
|
| 114 |
+
def load_from_file(self) -> bool:
|
| 115 |
+
"""从配置文件加载配置"""
|
| 116 |
+
try:
|
| 117 |
+
# utils.load_config() 当前不接受 file_path 参数,因此移除
|
| 118 |
+
config_data = load_config()
|
| 119 |
+
if config_data:
|
| 120 |
+
# 更新配置
|
| 121 |
+
for key, value in config_data.items():
|
| 122 |
+
if key != "accounts": # 账户信息单独处理
|
| 123 |
+
self.set(key, value)
|
| 124 |
+
|
| 125 |
+
# 处理账户信息
|
| 126 |
+
if "accounts" in config_data:
|
| 127 |
+
self.accounts = config_data["accounts"]
|
| 128 |
+
|
| 129 |
+
logger.info("已从配置文件加载配置")
|
| 130 |
+
return True
|
| 131 |
+
return False
|
| 132 |
+
except Exception as e:
|
| 133 |
+
logger.error(f"加载配置文件时出错: {e}")
|
| 134 |
+
return False
|
| 135 |
+
|
| 136 |
+
def load_from_env(self) -> None:
|
| 137 |
+
"""从环境变量加载配置"""
|
| 138 |
+
# 从环境变量加载账户信息
|
| 139 |
+
if not self.accounts:
|
| 140 |
+
accounts_env = os.getenv("ONDEMAND_ACCOUNTS", "")
|
| 141 |
+
if accounts_env:
|
| 142 |
+
try:
|
| 143 |
+
self.accounts = json.loads(accounts_env).get('accounts', [])
|
| 144 |
+
logger.info("已从环境变量加载账户信息")
|
| 145 |
+
except json.JSONDecodeError:
|
| 146 |
+
logger.error("解码 ONDEMAND_ACCOUNTS 环境变量失败")
|
| 147 |
+
|
| 148 |
+
# 从环境变量加载其他设置
|
| 149 |
+
env_mappings = {
|
| 150 |
+
"ondemand_session_timeout_minutes": "ONDEMAND_SESSION_TIMEOUT_MINUTES",
|
| 151 |
+
"session_timeout_minutes": "SESSION_TIMEOUT_MINUTES",
|
| 152 |
+
"max_retries": "MAX_RETRIES",
|
| 153 |
+
"retry_delay": "RETRY_DELAY",
|
| 154 |
+
"request_timeout": "REQUEST_TIMEOUT",
|
| 155 |
+
"stream_timeout": "STREAM_TIMEOUT",
|
| 156 |
+
"rate_limit": "RATE_LIMIT",
|
| 157 |
+
"debug_mode": "DEBUG_MODE",
|
| 158 |
+
"api_access_token": "API_ACCESS_TOKEN"
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
for config_key, env_key in env_mappings.items():
|
| 162 |
+
env_value = os.getenv(env_key)
|
| 163 |
+
if env_value is not None:
|
| 164 |
+
# 根据默认值的类型进行转换
|
| 165 |
+
default_value = self.get(config_key)
|
| 166 |
+
if isinstance(default_value, bool):
|
| 167 |
+
self.set(config_key, env_value.lower() == 'true')
|
| 168 |
+
elif isinstance(default_value, int):
|
| 169 |
+
self.set(config_key, int(env_value))
|
| 170 |
+
elif isinstance(default_value, float):
|
| 171 |
+
self.set(config_key, float(env_value))
|
| 172 |
+
else:
|
| 173 |
+
self.set(config_key, env_value)
|
| 174 |
+
|
| 175 |
+
def save_stats_to_file(self):
|
| 176 |
+
"""将统计数据保存到文件中"""
|
| 177 |
+
try:
|
| 178 |
+
with self.usage_stats_lock:
|
| 179 |
+
# 创建统计数据的副本
|
| 180 |
+
stats_copy = {
|
| 181 |
+
"total_requests": self.usage_stats["total_requests"],
|
| 182 |
+
"successful_requests": self.usage_stats["successful_requests"],
|
| 183 |
+
"failed_requests": self.usage_stats["failed_requests"],
|
| 184 |
+
"model_usage": dict(self.usage_stats["model_usage"]),
|
| 185 |
+
"account_usage": dict(self.usage_stats["account_usage"]),
|
| 186 |
+
"daily_usage": dict(self.usage_stats["daily_usage"]),
|
| 187 |
+
"hourly_usage": dict(self.usage_stats["hourly_usage"]),
|
| 188 |
+
"request_history": list(self.usage_stats["request_history"]),
|
| 189 |
+
"total_prompt_tokens": self.usage_stats["total_prompt_tokens"],
|
| 190 |
+
"total_completion_tokens": self.usage_stats["total_completion_tokens"],
|
| 191 |
+
"total_tokens": self.usage_stats["total_tokens"],
|
| 192 |
+
"model_tokens": dict(self.usage_stats["model_tokens"]),
|
| 193 |
+
"daily_tokens": dict(self.usage_stats["daily_tokens"]),
|
| 194 |
+
"hourly_tokens": dict(self.usage_stats["hourly_tokens"]),
|
| 195 |
+
"last_saved": datetime.now().isoformat()
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
stats_file_path = self.get("stats_file_path")
|
| 199 |
+
stats_backup_path = self.get("stats_backup_path")
|
| 200 |
+
|
| 201 |
+
# 先保存到备份文件,然后重命名,避免写入过程中的文件损坏
|
| 202 |
+
with open(stats_backup_path, 'w', encoding='utf-8') as f:
|
| 203 |
+
json.dump(stats_copy, f, ensure_ascii=False, indent=2)
|
| 204 |
+
|
| 205 |
+
# 如果主文件存在,先删除它
|
| 206 |
+
if os.path.exists(stats_file_path):
|
| 207 |
+
os.remove(stats_file_path)
|
| 208 |
+
|
| 209 |
+
# 将备份文件重命名为主文件
|
| 210 |
+
os.rename(stats_backup_path, stats_file_path)
|
| 211 |
+
|
| 212 |
+
logger.info(f"统计数据已保存到 {stats_file_path}")
|
| 213 |
+
self.usage_stats["last_saved"] = datetime.now().isoformat()
|
| 214 |
+
except Exception as e:
|
| 215 |
+
logger.error(f"保存统计数据时出错: {e}")
|
| 216 |
+
|
| 217 |
+
def load_stats_from_file(self):
|
| 218 |
+
"""从文件中加载统计数据"""
|
| 219 |
+
try:
|
| 220 |
+
stats_file_path = self.get("stats_file_path")
|
| 221 |
+
if os.path.exists(stats_file_path):
|
| 222 |
+
with open(stats_file_path, 'r', encoding='utf-8') as f:
|
| 223 |
+
saved_stats = json.load(f)
|
| 224 |
+
|
| 225 |
+
with self.usage_stats_lock:
|
| 226 |
+
# 更新基本计数器
|
| 227 |
+
self.usage_stats["total_requests"] = saved_stats.get("total_requests", 0)
|
| 228 |
+
self.usage_stats["successful_requests"] = saved_stats.get("successful_requests", 0)
|
| 229 |
+
self.usage_stats["failed_requests"] = saved_stats.get("failed_requests", 0)
|
| 230 |
+
self.usage_stats["total_prompt_tokens"] = saved_stats.get("total_prompt_tokens", 0)
|
| 231 |
+
self.usage_stats["total_completion_tokens"] = saved_stats.get("total_completion_tokens", 0)
|
| 232 |
+
self.usage_stats["total_tokens"] = saved_stats.get("total_tokens", 0)
|
| 233 |
+
|
| 234 |
+
# 更新字典类型的统计数据
|
| 235 |
+
for model, count in saved_stats.get("model_usage", {}).items():
|
| 236 |
+
self.usage_stats["model_usage"][model] = count
|
| 237 |
+
|
| 238 |
+
for account, count in saved_stats.get("account_usage", {}).items():
|
| 239 |
+
self.usage_stats["account_usage"][account] = count
|
| 240 |
+
|
| 241 |
+
for day, count in saved_stats.get("daily_usage", {}).items():
|
| 242 |
+
self.usage_stats["daily_usage"][day] = count
|
| 243 |
+
|
| 244 |
+
for hour, count in saved_stats.get("hourly_usage", {}).items():
|
| 245 |
+
self.usage_stats["hourly_usage"][hour] = count
|
| 246 |
+
|
| 247 |
+
for model, tokens in saved_stats.get("model_tokens", {}).items():
|
| 248 |
+
self.usage_stats["model_tokens"][model] = tokens
|
| 249 |
+
|
| 250 |
+
for day, tokens in saved_stats.get("daily_tokens", {}).items():
|
| 251 |
+
self.usage_stats["daily_tokens"][day] = tokens
|
| 252 |
+
|
| 253 |
+
for hour, tokens in saved_stats.get("hourly_tokens", {}).items():
|
| 254 |
+
self.usage_stats["hourly_tokens"][hour] = tokens
|
| 255 |
+
|
| 256 |
+
# 更新请求历史
|
| 257 |
+
self.usage_stats["request_history"] = saved_stats.get("request_history", [])
|
| 258 |
+
|
| 259 |
+
# 限制历史记录数量
|
| 260 |
+
max_history_items = self.get("max_history_items")
|
| 261 |
+
if len(self.usage_stats["request_history"]) > max_history_items:
|
| 262 |
+
self.usage_stats["request_history"] = self.usage_stats["request_history"][-max_history_items:]
|
| 263 |
+
|
| 264 |
+
logger.info(f"已从 {stats_file_path} 加载统计数据")
|
| 265 |
+
return True
|
| 266 |
+
else:
|
| 267 |
+
logger.info(f"未找到统计数据文件 {stats_file_path},将使用默认值")
|
| 268 |
+
return False
|
| 269 |
+
except Exception as e:
|
| 270 |
+
logger.error(f"加载统计数据时出错: {e}")
|
| 271 |
+
return False
|
| 272 |
+
|
| 273 |
+
def start_stats_save_thread(self):
|
| 274 |
+
"""启动定期保存统计数据的线程"""
|
| 275 |
+
def save_stats_periodically():
|
| 276 |
+
while True:
|
| 277 |
+
time.sleep(self.get("stats_save_interval"))
|
| 278 |
+
self.save_stats_to_file()
|
| 279 |
+
|
| 280 |
+
save_thread = threading.Thread(target=save_stats_periodically, daemon=True)
|
| 281 |
+
save_thread.start()
|
| 282 |
+
logger.info(f"统计数据保存线程已启动,每 {self.get('stats_save_interval')} 秒保存一次")
|
| 283 |
+
|
| 284 |
+
def init(self):
|
| 285 |
+
"""初始化配置,从配置文件或环境变量加载设置"""
|
| 286 |
+
# 从配置文件加载配置
|
| 287 |
+
self.load_from_file()
|
| 288 |
+
|
| 289 |
+
# 从环境变量加载配置
|
| 290 |
+
self.load_from_env()
|
| 291 |
+
|
| 292 |
+
# 验证账户信息
|
| 293 |
+
if not self.accounts:
|
| 294 |
+
error_msg = "在 config.json 或环境变量 ONDEMAND_ACCOUNTS 中未找到账户信息"
|
| 295 |
+
logger.critical(error_msg)
|
| 296 |
+
# 不抛出异常,而是继续运行
|
| 297 |
+
logger.warning("将继续运行,但没有账户信息,可能会导致功能受限")
|
| 298 |
+
|
| 299 |
+
logger.info("已加载API访问Token")
|
| 300 |
+
|
| 301 |
+
# 加载之前保存的统计数据
|
| 302 |
+
self.load_stats_from_file()
|
| 303 |
+
|
| 304 |
+
# 启动定期保存统计数据的线程
|
| 305 |
+
self.start_stats_save_thread()
|
| 306 |
+
|
| 307 |
+
def get_next_ondemand_account_details(self):
|
| 308 |
+
"""获取下一个 OnDemand 账户的邮箱和密码,用于轮询。
|
| 309 |
+
会跳过处于冷却期的账户。"""
|
| 310 |
+
with self.account_index_lock:
|
| 311 |
+
current_time = datetime.now()
|
| 312 |
+
|
| 313 |
+
# 清理过期的冷却记录
|
| 314 |
+
expired_cooldowns = [email for email, end_time in self.account_cooldowns.items()
|
| 315 |
+
if end_time < current_time]
|
| 316 |
+
for email in expired_cooldowns:
|
| 317 |
+
del self.account_cooldowns[email]
|
| 318 |
+
logger.info(f"账户 {email} 的冷却期已结束,现在可用")
|
| 319 |
+
|
| 320 |
+
# 尝试最多len(self.accounts)次,以找到一个不在冷却期的账户
|
| 321 |
+
for _ in range(len(self.accounts)):
|
| 322 |
+
account_details = self.accounts[self.current_account_index]
|
| 323 |
+
email = account_details.get('email')
|
| 324 |
+
|
| 325 |
+
# 更新索引到下一个账户,为下次调用做准备
|
| 326 |
+
self.current_account_index = (self.current_account_index + 1) % len(self.accounts)
|
| 327 |
+
|
| 328 |
+
# 检查账户是否在冷却期
|
| 329 |
+
if email in self.account_cooldowns:
|
| 330 |
+
cooldown_end = self.account_cooldowns[email]
|
| 331 |
+
remaining_seconds = (cooldown_end - current_time).total_seconds()
|
| 332 |
+
logger.warning(f"账户 {email} 仍在冷却期中,还剩 {remaining_seconds:.1f} 秒")
|
| 333 |
+
continue # 尝试下一个账户
|
| 334 |
+
|
| 335 |
+
# 找到一个可用账户
|
| 336 |
+
logger.info(f"[系统] 新会话将使用账户: {email}")
|
| 337 |
+
return email, account_details.get('password')
|
| 338 |
+
|
| 339 |
+
# 如果所有账户都在冷却期,使用第一个账户(即使它在冷却期)
|
| 340 |
+
logger.warning("所有账户都在冷却期!使用第一个账户,尽管它可能会触发速率限制")
|
| 341 |
+
account_details = self.accounts[0]
|
| 342 |
+
return account_details.get('email'), account_details.get('password')
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
# 创建全局配置实例
|
| 346 |
+
config_instance = Config()
|
| 347 |
+
|
| 348 |
+
def init_config():
|
| 349 |
+
"""初始化配置的兼容函数,用于向后兼容"""
|
| 350 |
+
config_instance.init()
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def get_config_value(name: str, default: Any = None) -> Any:
|
| 354 |
+
"""
|
| 355 |
+
获取当前配置变量的最新值。
|
| 356 |
+
推荐外部通过 config.get_config_value('变量名') 获取配置。
|
| 357 |
+
对于 accounts, model_mapping, usage_stats, client_sessions,请使用新增的专用getter函数。
|
| 358 |
+
"""
|
| 359 |
+
return config_instance.get(name, default)
|
| 360 |
+
|
| 361 |
+
# 新增的类型安全的getter函数
|
| 362 |
+
def get_accounts() -> List[Dict[str, str]]:
|
| 363 |
+
"""获取账户信息列表"""
|
| 364 |
+
return config_instance.accounts
|
| 365 |
+
|
| 366 |
+
def get_model_mapping() -> Dict[str, str]:
|
| 367 |
+
"""获取模型名称到端点ID的映射"""
|
| 368 |
+
return config_instance._model_mapping
|
| 369 |
+
|
| 370 |
+
def get_usage_stats() -> Dict[str, Any]:
|
| 371 |
+
"""获取用量统计数据"""
|
| 372 |
+
return config_instance.usage_stats
|
| 373 |
+
|
| 374 |
+
def get_client_sessions() -> Dict[str, Any]:
|
| 375 |
+
"""获取客户端会话信息"""
|
| 376 |
+
return config_instance.client_sessions
|
| 377 |
+
|
| 378 |
+
def get_next_ondemand_account_details():
|
| 379 |
+
"""获取下一个账户的兼容函数"""
|
| 380 |
+
return config_instance.get_next_ondemand_account_details()
|
| 381 |
+
|
| 382 |
+
def set_account_cooldown(email, cooldown_seconds=None):
|
| 383 |
+
"""设置账户冷却期
|
| 384 |
+
|
| 385 |
+
Args:
|
| 386 |
+
email: 账户邮箱
|
| 387 |
+
cooldown_seconds: 冷却时间(秒),如果为None则使用默认配置
|
| 388 |
+
"""
|
| 389 |
+
if cooldown_seconds is None:
|
| 390 |
+
cooldown_seconds = config_instance.get('account_cooldown_seconds')
|
| 391 |
+
|
| 392 |
+
cooldown_end = datetime.now() + timedelta(seconds=cooldown_seconds)
|
| 393 |
+
with config_instance.account_index_lock: # 使用相同的锁保护冷却期字典
|
| 394 |
+
config_instance.account_cooldowns[email] = cooldown_end
|
| 395 |
+
logger.warning(f"账户 {email} 已设置冷却期 {cooldown_seconds} 秒,将于 {cooldown_end.strftime('%Y-%m-%d %H:%M:%S')} 结束")
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
# ⚠️ 警告:为保证配置动态更新,请勿使用 from config import XXX,只使用 import config 并通过 config.get_config_value('变量名') 获取配置。
|
| 399 |
+
# 这样可确保配置值始终是最新的。
|
| 400 |
+
# (。•ᴗ-)ノ゙ 你的聪明小助手温馨提示~
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
requests
|
| 3 |
+
tiktoken
|
| 4 |
+
regex
|
routes.py
ADDED
|
@@ -0,0 +1,1043 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import time
|
| 3 |
+
import uuid
|
| 4 |
+
import html
|
| 5 |
+
import hashlib # Added import
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from typing import Dict, List, Any, Optional
|
| 8 |
+
from flask import request, Response, stream_with_context, jsonify, render_template, redirect, url_for, flash
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
|
| 11 |
+
from utils import logger, generate_request_id, count_tokens, count_message_tokens
|
| 12 |
+
import config
|
| 13 |
+
from auth import RateLimiter
|
| 14 |
+
from client import OnDemandAPIClient
|
| 15 |
+
from datetime import timedelta
|
| 16 |
+
|
| 17 |
+
# 初始化速率限制器
|
| 18 |
+
# rate_limiter 将在 config_instance 定义后初始化
|
| 19 |
+
|
| 20 |
+
# 获取配置实例
|
| 21 |
+
config_instance = config.config_instance
|
| 22 |
+
rate_limiter = RateLimiter(config_instance.get('rate_limit_per_minute', 60)) # 从配置读取,默认为60
|
| 23 |
+
|
| 24 |
+
# 模型价格配置将从 config_instance 获取
|
| 25 |
+
# 默认价格也将从 config_instance 获取
|
| 26 |
+
|
| 27 |
+
def format_datetime(timestamp):
|
| 28 |
+
"""将ISO格式时间戳格式化为更易读的格式"""
|
| 29 |
+
if not timestamp or timestamp == "从未保存":
|
| 30 |
+
return timestamp
|
| 31 |
+
|
| 32 |
+
try:
|
| 33 |
+
# 处理ISO格式时间戳
|
| 34 |
+
if 'T' in timestamp:
|
| 35 |
+
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
|
| 36 |
+
return dt.strftime('%Y-%m-%d %H:%M:%S')
|
| 37 |
+
# 处理已经是格式化字符串的情况
|
| 38 |
+
return timestamp
|
| 39 |
+
except Exception:
|
| 40 |
+
return timestamp
|
| 41 |
+
|
| 42 |
+
def format_number(value):
|
| 43 |
+
"""根据数值大小自动转换单位"""
|
| 44 |
+
if value is None or value == '-':
|
| 45 |
+
return '-'
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
value = float(value)
|
| 49 |
+
if value >= 1000000000000: # 万亿 (T)
|
| 50 |
+
return f"{value/1000000000000:.2f}T"
|
| 51 |
+
elif value >= 1000000000: # 十亿 (G)
|
| 52 |
+
return f"{value/1000000000:.2f}G"
|
| 53 |
+
elif value >= 1000000: # 百万 (M)
|
| 54 |
+
return f"{value/1000000:.2f}M"
|
| 55 |
+
elif value >= 1000: # 千 (K)
|
| 56 |
+
return f"{value/1000:.2f}K"
|
| 57 |
+
elif value == 0: # 零
|
| 58 |
+
return "0"
|
| 59 |
+
elif abs(value) < 0.01: # 非常小的数值,使用科学计数法
|
| 60 |
+
return f"{value:.2e}"
|
| 61 |
+
else:
|
| 62 |
+
return f"{value:.0f}" if value == int(value) else f"{value:.2f}"
|
| 63 |
+
except (ValueError, TypeError):
|
| 64 |
+
return str(value)
|
| 65 |
+
|
| 66 |
+
def format_duration(ms):
|
| 67 |
+
"""将毫秒格式化为更易读的格式"""
|
| 68 |
+
if ms is None or ms == '-':
|
| 69 |
+
return '-'
|
| 70 |
+
|
| 71 |
+
try:
|
| 72 |
+
ms = float(ms) # 使用float而不是int,以支持小数
|
| 73 |
+
if ms >= 86400000: # 超过1天 (24*60*60*1000)
|
| 74 |
+
return f"{ms/86400000:.2f}天"
|
| 75 |
+
elif ms >= 3600000: # 超过1小时 (60*60*1000)
|
| 76 |
+
return f"{ms/3600000:.2f}小时"
|
| 77 |
+
elif ms >= 60000: # 超过1分钟 (60*1000)
|
| 78 |
+
return f"{ms/60000:.2f}分钟"
|
| 79 |
+
elif ms >= 1000: # 超过1秒
|
| 80 |
+
return f"{ms/1000:.2f}秒"
|
| 81 |
+
else:
|
| 82 |
+
return f"{ms:.0f}" if ms == int(ms) else f"{ms:.2f}毫秒"
|
| 83 |
+
except (ValueError, TypeError):
|
| 84 |
+
return str(ms)
|
| 85 |
+
|
| 86 |
+
def _update_usage_statistics(
|
| 87 |
+
config_inst,
|
| 88 |
+
request_id: str,
|
| 89 |
+
requested_model_name: str,
|
| 90 |
+
account_email: Optional[str],
|
| 91 |
+
is_success: bool,
|
| 92 |
+
duration_ms: int,
|
| 93 |
+
is_stream: bool,
|
| 94 |
+
prompt_tokens_val: int,
|
| 95 |
+
completion_tokens_val: int,
|
| 96 |
+
total_tokens_val: int,
|
| 97 |
+
prompt_length: Optional[int] = None,
|
| 98 |
+
completion_length: Optional[int] = None,
|
| 99 |
+
error_message: Optional[str] = None,
|
| 100 |
+
used_actual_tokens_for_history: bool = False
|
| 101 |
+
):
|
| 102 |
+
"""更新使用统计与请求历史的辅助函数。"""
|
| 103 |
+
with config_inst.usage_stats_lock:
|
| 104 |
+
config_inst.usage_stats["total_requests"] += 1
|
| 105 |
+
|
| 106 |
+
current_email_for_stats = account_email if account_email else "unknown_account"
|
| 107 |
+
|
| 108 |
+
if is_success:
|
| 109 |
+
config_inst.usage_stats["successful_requests"] += 1
|
| 110 |
+
config_inst.usage_stats["model_usage"].setdefault(requested_model_name, 0)
|
| 111 |
+
config_inst.usage_stats["model_usage"][requested_model_name] += 1
|
| 112 |
+
|
| 113 |
+
config_inst.usage_stats["account_usage"].setdefault(current_email_for_stats, 0)
|
| 114 |
+
config_inst.usage_stats["account_usage"][current_email_for_stats] += 1
|
| 115 |
+
|
| 116 |
+
config_inst.usage_stats["total_prompt_tokens"] += prompt_tokens_val
|
| 117 |
+
config_inst.usage_stats["total_completion_tokens"] += completion_tokens_val
|
| 118 |
+
config_inst.usage_stats["total_tokens"] += total_tokens_val
|
| 119 |
+
config_inst.usage_stats["model_tokens"].setdefault(requested_model_name, 0)
|
| 120 |
+
config_inst.usage_stats["model_tokens"][requested_model_name] += total_tokens_val
|
| 121 |
+
|
| 122 |
+
today = datetime.now().strftime("%Y-%m-%d")
|
| 123 |
+
hour = datetime.now().strftime("%Y-%m-%d %H:00")
|
| 124 |
+
|
| 125 |
+
config_inst.usage_stats["daily_usage"].setdefault(today, 0)
|
| 126 |
+
config_inst.usage_stats["daily_usage"][today] += 1
|
| 127 |
+
|
| 128 |
+
config_inst.usage_stats["hourly_usage"].setdefault(hour, 0)
|
| 129 |
+
config_inst.usage_stats["hourly_usage"][hour] += 1
|
| 130 |
+
|
| 131 |
+
config_inst.usage_stats["daily_tokens"].setdefault(today, 0)
|
| 132 |
+
config_inst.usage_stats["daily_tokens"][today] += total_tokens_val
|
| 133 |
+
|
| 134 |
+
config_inst.usage_stats["hourly_tokens"].setdefault(hour, 0)
|
| 135 |
+
config_inst.usage_stats["hourly_tokens"][hour] += total_tokens_val
|
| 136 |
+
else:
|
| 137 |
+
config_inst.usage_stats["failed_requests"] += 1
|
| 138 |
+
|
| 139 |
+
history_entry = {
|
| 140 |
+
"id": request_id,
|
| 141 |
+
"timestamp": datetime.now().isoformat(),
|
| 142 |
+
"model": requested_model_name,
|
| 143 |
+
"account": current_email_for_stats,
|
| 144 |
+
"success": is_success,
|
| 145 |
+
"duration_ms": duration_ms,
|
| 146 |
+
"stream": is_stream,
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
if is_success:
|
| 150 |
+
if prompt_length is not None:
|
| 151 |
+
history_entry["prompt_length"] = prompt_length
|
| 152 |
+
if completion_length is not None:
|
| 153 |
+
history_entry["completion_length"] = completion_length
|
| 154 |
+
|
| 155 |
+
if is_stream:
|
| 156 |
+
if used_actual_tokens_for_history:
|
| 157 |
+
history_entry["prompt_tokens"] = prompt_tokens_val
|
| 158 |
+
history_entry["completion_tokens"] = completion_tokens_val
|
| 159 |
+
history_entry["total_tokens"] = total_tokens_val
|
| 160 |
+
else:
|
| 161 |
+
history_entry["prompt_tokens"] = prompt_tokens_val
|
| 162 |
+
history_entry["estimated_completion_tokens"] = completion_tokens_val
|
| 163 |
+
history_entry["estimated_total_tokens"] = total_tokens_val
|
| 164 |
+
else:
|
| 165 |
+
history_entry["prompt_tokens"] = prompt_tokens_val
|
| 166 |
+
history_entry["completion_tokens"] = completion_tokens_val
|
| 167 |
+
history_entry["total_tokens"] = total_tokens_val
|
| 168 |
+
else:
|
| 169 |
+
if error_message:
|
| 170 |
+
history_entry["error"] = error_message
|
| 171 |
+
if prompt_tokens_val > 0:
|
| 172 |
+
history_entry["prompt_tokens_attempted"] = prompt_tokens_val
|
| 173 |
+
|
| 174 |
+
config_inst.usage_stats["request_history"].append(history_entry)
|
| 175 |
+
max_history_items = config_inst.get('max_history_items', 1000)
|
| 176 |
+
if len(config_inst.usage_stats["request_history"]) > max_history_items:
|
| 177 |
+
config_inst.usage_stats["request_history"] = \
|
| 178 |
+
config_inst.usage_stats["request_history"][-max_history_items:]
|
| 179 |
+
|
| 180 |
+
def _generate_hash_for_full_history(full_messages_list: List[Dict[str, str]], req_id: str) -> Optional[str]:
|
| 181 |
+
"""
|
| 182 |
+
Generates a SHA256 hash from a list of messages, considering all messages.
|
| 183 |
+
"""
|
| 184 |
+
if not full_messages_list:
|
| 185 |
+
logger.debug(f"[{req_id}] (_generate_hash_for_full_history) No messages to hash.")
|
| 186 |
+
return None
|
| 187 |
+
try:
|
| 188 |
+
# Ensure consistent serialization for hashing
|
| 189 |
+
# Context meaning is only in role and content
|
| 190 |
+
simplified_history = [{"role": msg.get("role"), "content": msg.get("content")} for msg in full_messages_list]
|
| 191 |
+
serialized_history = json.dumps(simplified_history, sort_keys=True)
|
| 192 |
+
return hashlib.sha256(serialized_history.encode('utf-8')).hexdigest()
|
| 193 |
+
except (TypeError, ValueError) as e:
|
| 194 |
+
logger.error(f"[{req_id}] (_generate_hash_for_full_history) Failed to serialize full history messages for hashing: {e}")
|
| 195 |
+
return None
|
| 196 |
+
|
| 197 |
+
def _update_client_context_hash_after_reply(
|
| 198 |
+
original_request_messages: List[Dict[str, str]],
|
| 199 |
+
assistant_reply_content: str,
|
| 200 |
+
request_id: str,
|
| 201 |
+
user_identifier: str, # Corresponds to 'token' in chat_completions
|
| 202 |
+
email_for_stats: Optional[str],
|
| 203 |
+
current_ondemand_client_instance: Optional[OnDemandAPIClient],
|
| 204 |
+
config_inst: config.Config,
|
| 205 |
+
logger_instance # Pass logger directly
|
| 206 |
+
):
|
| 207 |
+
"""
|
| 208 |
+
Helper to update the client's active_context_hash after a successful reply
|
| 209 |
+
using the full conversation history up to the assistant's reply.
|
| 210 |
+
"""
|
| 211 |
+
if not assistant_reply_content or not email_for_stats or not current_ondemand_client_instance:
|
| 212 |
+
logger_instance.debug(f"[{request_id}] 更新客户端上下文哈希的条件不足(回复内容 '{bool(assistant_reply_content)}', 邮箱 '{email_for_stats}', 客户端实例 '{bool(current_ondemand_client_instance)}'),跳过。")
|
| 213 |
+
return
|
| 214 |
+
|
| 215 |
+
assistant_message = {"role": "assistant", "content": assistant_reply_content}
|
| 216 |
+
# original_request_messages should be the messages list as it was when the request came in.
|
| 217 |
+
full_history_up_to_assistant_reply = original_request_messages + [assistant_message]
|
| 218 |
+
|
| 219 |
+
next_active_context_hash = _generate_hash_for_full_history(full_history_up_to_assistant_reply, request_id)
|
| 220 |
+
|
| 221 |
+
if next_active_context_hash:
|
| 222 |
+
with config_inst.client_sessions_lock:
|
| 223 |
+
if user_identifier in config_inst.client_sessions and \
|
| 224 |
+
email_for_stats in config_inst.client_sessions[user_identifier]:
|
| 225 |
+
|
| 226 |
+
session_data_to_update = config_inst.client_sessions[user_identifier][email_for_stats]
|
| 227 |
+
client_in_session = session_data_to_update.get("client")
|
| 228 |
+
|
| 229 |
+
# DEBUGGING LOGS START
|
| 230 |
+
logger_instance.debug(f"[{request_id}] HASH_UPDATE_DEBUG: client_in_session id={id(client_in_session)}, email={getattr(client_in_session, 'email', 'N/A')}, session_id={getattr(client_in_session, 'session_id', 'N/A')}")
|
| 231 |
+
logger_instance.debug(f"[{request_id}] HASH_UPDATE_DEBUG: current_ondemand_client_instance id={id(current_ondemand_client_instance)}, email={getattr(current_ondemand_client_instance, 'email', 'N/A')}, session_id={getattr(current_ondemand_client_instance, 'session_id', 'N/A')}")
|
| 232 |
+
logger_instance.debug(f"[{request_id}] HASH_UPDATE_DEBUG: Comparison result (client_in_session == current_ondemand_client_instance): {client_in_session == current_ondemand_client_instance}")
|
| 233 |
+
logger_instance.debug(f"[{request_id}] HASH_UPDATE_DEBUG: Comparison result (client_in_session is current_ondemand_client_instance): {client_in_session is current_ondemand_client_instance}")
|
| 234 |
+
# DEBUGGING LOGS END
|
| 235 |
+
|
| 236 |
+
if client_in_session == current_ondemand_client_instance:
|
| 237 |
+
old_hash = session_data_to_update.get("active_context_hash")
|
| 238 |
+
session_data_to_update["active_context_hash"] = next_active_context_hash
|
| 239 |
+
session_data_to_update["last_time"] = datetime.now()
|
| 240 |
+
logger_instance.info(f"[{request_id}] 客户端 (账户: {email_for_stats}) 的 active_context_hash 已从 '{old_hash}' 更新为 '{next_active_context_hash}' 以反映对话进展。")
|
| 241 |
+
else:
|
| 242 |
+
logger_instance.warning(f"[{request_id}] 尝试更新哈希时,发现 email_for_stats '{email_for_stats}' 对应的存储客户端与当前使用的 ondemand_client 不一致。跳过更新。")
|
| 243 |
+
else:
|
| 244 |
+
logger_instance.warning(f"[{request_id}] 尝试更新哈希时,在 client_sessions 中未找到用户 '{user_identifier}' 或账户 '{email_for_stats}'。跳过更新。")
|
| 245 |
+
else:
|
| 246 |
+
logger_instance.warning(f"[{request_id}] 未能为下一次交互生成新的 active_context_hash (基于回复 '{bool(assistant_reply_content)}'). 客户端的哈希未更新。")
|
| 247 |
+
|
| 248 |
+
def _get_context_key_from_messages(messages: List[Dict[str, str]], req_id: str) -> Optional[str]:
|
| 249 |
+
"""
|
| 250 |
+
从末次用户消息前的消息列表生成上下文哈希密钥。
|
| 251 |
+
"""
|
| 252 |
+
if not messages:
|
| 253 |
+
logger.debug(f"[{req_id}] 无消息可供生成上下文密钥。")
|
| 254 |
+
return None
|
| 255 |
+
|
| 256 |
+
last_user_msg_idx = -1
|
| 257 |
+
for i in range(len(messages) - 1, -1, -1):
|
| 258 |
+
if messages[i].get('role') == 'user':
|
| 259 |
+
last_user_msg_idx = i
|
| 260 |
+
break
|
| 261 |
+
|
| 262 |
+
# 若无用户消息或用户消息为首条,则无先前历史可生成上下文密钥。
|
| 263 |
+
if last_user_msg_idx <= 0:
|
| 264 |
+
logger.debug(f"[{req_id}] 无先前历史可生成上下文密钥 (last_user_msg_idx: {last_user_msg_idx})。")
|
| 265 |
+
return None
|
| 266 |
+
|
| 267 |
+
historical_messages = messages[:last_user_msg_idx]
|
| 268 |
+
if not historical_messages: # 应由 last_user_msg_idx <= 0 捕获,此处为额外保障
|
| 269 |
+
logger.debug(f"[{req_id}] 上下文密钥的历史消息列表为空。")
|
| 270 |
+
return None
|
| 271 |
+
|
| 272 |
+
try:
|
| 273 |
+
# 确保哈希序列化的一致性
|
| 274 |
+
# 上下文意义仅关注角色和内容
|
| 275 |
+
simplified_history = [{"role": msg.get("role"), "content": msg.get("content")} for msg in historical_messages]
|
| 276 |
+
serialized_history = json.dumps(simplified_history, sort_keys=True)
|
| 277 |
+
return hashlib.sha256(serialized_history.encode('utf-8')).hexdigest()
|
| 278 |
+
except (TypeError, ValueError) as e:
|
| 279 |
+
logger.error(f"[{req_id}] 序列化历史消息以生成上下文密钥失败: {e}")
|
| 280 |
+
return None
|
| 281 |
+
|
| 282 |
+
def register_routes(app):
|
| 283 |
+
"""注册所有路由到Flask应用"""
|
| 284 |
+
|
| 285 |
+
# 注册自定义过滤器
|
| 286 |
+
app.jinja_env.filters['format_datetime'] = format_datetime
|
| 287 |
+
app.jinja_env.filters['format_number'] = format_number
|
| 288 |
+
app.jinja_env.filters['format_duration'] = format_duration
|
| 289 |
+
|
| 290 |
+
@app.route('/health', methods=['GET'])
|
| 291 |
+
def health_check():
|
| 292 |
+
"""健康检查端点,返回服务状态"""
|
| 293 |
+
return {"status": "ok", "message": "2API服务运行正常"}, 200
|
| 294 |
+
|
| 295 |
+
@app.route('/v1/models', methods=['GET'])
|
| 296 |
+
def list_models():
|
| 297 |
+
"""以 OpenAI 格式返回可用模型列表。"""
|
| 298 |
+
data = []
|
| 299 |
+
# 获取当前时间戳,用于 'created' 字段
|
| 300 |
+
created_time = int(time.time())
|
| 301 |
+
model_mapping = config_instance._model_mapping
|
| 302 |
+
for openai_name in model_mapping.keys(): # 仅列出已映射的模型
|
| 303 |
+
data.append({
|
| 304 |
+
"id": openai_name,
|
| 305 |
+
"object": "model",
|
| 306 |
+
"created": created_time,
|
| 307 |
+
"owned_by": "on-demand.io" # 或根据模型来源填写 "openai", "anthropic" 等
|
| 308 |
+
})
|
| 309 |
+
return {"object": "list", "data": data}
|
| 310 |
+
|
| 311 |
+
@app.route('/v1/chat/completions', methods=['POST'])
|
| 312 |
+
def chat_completions():
|
| 313 |
+
"""处理聊天补全请求,兼容 OpenAI 格式。"""
|
| 314 |
+
request_id = generate_request_id() # 生成唯一的请求 ID
|
| 315 |
+
logger.info(f"[{request_id}] CHAT_COMPLETIONS_ENTRY_POINT") # 最早的日志点
|
| 316 |
+
client_ip = request.remote_addr # 获取客户端 IP 地址,仅用于日志记录
|
| 317 |
+
logger.info(f"[{request_id}] 收到来自 IP: {client_ip} 的 /v1/chat/completions 请求")
|
| 318 |
+
|
| 319 |
+
# 尝试在更早的位置打印一些调试信息
|
| 320 |
+
logger.info(f"[{request_id}] DEBUG_ENTRY: 进入 chat_completions。")
|
| 321 |
+
|
| 322 |
+
# 验证访问令牌
|
| 323 |
+
auth_header = request.headers.get('Authorization')
|
| 324 |
+
if not auth_header or not auth_header.startswith('Bearer '):
|
| 325 |
+
logger.warning(f"[{request_id}] 未提供认证令牌或格式错误")
|
| 326 |
+
return {"error": {"message": "缺少有效的认证令牌", "type": "auth_error", "code": "missing_token"}}, 401
|
| 327 |
+
|
| 328 |
+
# 获取API访问令牌
|
| 329 |
+
api_access_token = config_instance.get('api_access_token')
|
| 330 |
+
token = auth_header[7:] # 去掉 'Bearer ' 前缀
|
| 331 |
+
if token != api_access_token:
|
| 332 |
+
logger.warning(f"[{request_id}] 提供了无效的认证令牌")
|
| 333 |
+
return {"error": {"message": "无效的认证令牌", "type": "auth_error", "code": "invalid_token"}}, 401
|
| 334 |
+
|
| 335 |
+
# 检查速率限制 - 使用token而不是IP进行限制
|
| 336 |
+
if not rate_limiter.is_allowed(token):
|
| 337 |
+
logger.warning(f"[{request_id}] 用户 {token[:8]}... 超过速率限制")
|
| 338 |
+
return {"error": {"message": "请求频率过高,请稍后再试", "type": "rate_limit_error", "code": "rate_limit_exceeded"}}, 429
|
| 339 |
+
|
| 340 |
+
openai_data = request.get_json()
|
| 341 |
+
if not openai_data:
|
| 342 |
+
logger.error(f"[{request_id}] 请求体不是有效的JSON")
|
| 343 |
+
return {"error": {"message": "请求体必须是 JSON。", "type": "invalid_request_error", "code": None}}, 400
|
| 344 |
+
|
| 345 |
+
if app.config.get('DEBUG_MODE', False):
|
| 346 |
+
logger.debug(f"[{request_id}] OpenAI 请求数据: {json.dumps(openai_data, indent=2, ensure_ascii=False)}")
|
| 347 |
+
|
| 348 |
+
# 从 OpenAI 请求中提取参数
|
| 349 |
+
# Capture the initial messages from the request for later use in rolling hash update
|
| 350 |
+
initial_messages_from_request: List[Dict[str, str]] = openai_data.get('messages', [])
|
| 351 |
+
messages: List[Dict[str, str]] = initial_messages_from_request # Keep 'messages' for existing logic
|
| 352 |
+
stream_requested: bool = openai_data.get('stream', False)
|
| 353 |
+
# 如果请求中没有指定模型,则使用映射表中的一个默认模型,或者最终的 DEFAULT_ENDPOINT_ID
|
| 354 |
+
model_mapping = config_instance._model_mapping
|
| 355 |
+
default_endpoint_id = config_instance.get('default_endpoint_id')
|
| 356 |
+
requested_model_name: str = openai_data.get('model', list(model_mapping.keys())[0] if model_mapping else default_endpoint_id)
|
| 357 |
+
|
| 358 |
+
# 从请求中获取参数,如果未提供则为 None
|
| 359 |
+
temperature: Optional[float] = openai_data.get('temperature')
|
| 360 |
+
max_tokens: Optional[int] = openai_data.get('max_tokens')
|
| 361 |
+
top_p: Optional[float] = openai_data.get('top_p')
|
| 362 |
+
frequency_penalty: Optional[float] = openai_data.get('frequency_penalty')
|
| 363 |
+
presence_penalty: Optional[float] = openai_data.get('presence_penalty')
|
| 364 |
+
|
| 365 |
+
if not messages:
|
| 366 |
+
logger.error(f"[{request_id}] 缺少 'messages' 字段")
|
| 367 |
+
return {"error": {"message": "缺少 'messages' 字段。", "type": "invalid_request_error", "code": "missing_messages"}}, 400
|
| 368 |
+
|
| 369 |
+
# 为 on-demand.io 构建查询
|
| 370 |
+
# on-demand.io 通常接受单个查询字符串,上下文由其会话管理。
|
| 371 |
+
# 我们将发送最新的用户查询,可选地以系统提示为前缀。
|
| 372 |
+
# --- 上下文感知会话管理与查询构建 (v2) ---
|
| 373 |
+
|
| 374 |
+
# 1. 提取消息组件与上下文密钥
|
| 375 |
+
logger.info(f"[{request_id}] DEBUG_PRE_HASH_COMPUTATION: 即将计算 request_context_hash。")
|
| 376 |
+
request_context_hash = _get_context_key_from_messages(messages, request_id)
|
| 377 |
+
logger.info(f"[{request_id}] 请求上下文哈希值: {repr(request_context_hash)}") # 使用 repr()
|
| 378 |
+
|
| 379 |
+
logger.info(f"[{request_id}] DEBUG_POINT_A: 即将初始化 historical_messages。")
|
| 380 |
+
historical_messages = []
|
| 381 |
+
logger.info(f"[{request_id}] DEBUG_POINT_B: historical_messages 初始化为空列表。即将检查 request_context_hash ({repr(request_context_hash)}).")
|
| 382 |
+
|
| 383 |
+
if request_context_hash: # 注意:空字符串的布尔值为 False
|
| 384 |
+
logger.info(f"[{request_id}] DEBUG_POINT_C: request_context_hash ({repr(request_context_hash)}) 为真,进入历史提取块。")
|
| 385 |
+
last_user_idx = -1
|
| 386 |
+
try:
|
| 387 |
+
for i in range(len(messages) - 1, -1, -1):
|
| 388 |
+
if messages[i].get('role') == 'user': last_user_idx = i; break
|
| 389 |
+
except Exception as e_loop:
|
| 390 |
+
logger.error(f"[{request_id}] DEBUG_LOOP_ERROR: 在查找 last_user_idx 的循环中发生错误: {e_loop}")
|
| 391 |
+
last_user_idx = -1 # 确保安全
|
| 392 |
+
|
| 393 |
+
logger.info(f"[{request_id}] DEBUG_POINT_D: last_user_idx = {last_user_idx}")
|
| 394 |
+
if last_user_idx > 0:
|
| 395 |
+
try:
|
| 396 |
+
historical_messages = messages[:last_user_idx]
|
| 397 |
+
logger.info(f"[{request_id}] DEBUG_POINT_E: historical_messages 赋值自 messages[:{last_user_idx}]")
|
| 398 |
+
except Exception as e_slice:
|
| 399 |
+
logger.error(f"[{request_id}] DEBUG_SLICE_ERROR: 在切片 messages[:{last_user_idx}] 时发生错误: {e_slice}")
|
| 400 |
+
historical_messages = [] # 确保安全
|
| 401 |
+
|
| 402 |
+
if historical_messages:
|
| 403 |
+
logger.info(f"[{request_id}] DEBUG_HISTORICAL_CONTENT: 'historical_messages' 提取后内容: {json.dumps(historical_messages, ensure_ascii=False, indent=2)}")
|
| 404 |
+
else:
|
| 405 |
+
logger.info(f"[{request_id}] DEBUG_HISTORICAL_EMPTY: 'historical_messages' 提取后为空列表。last_user_idx={last_user_idx}, request_context_hash='{request_context_hash}'")
|
| 406 |
+
|
| 407 |
+
elif not request_context_hash: # request_context_hash is None or empty string
|
| 408 |
+
logger.info(f"[{request_id}] DEBUG_HISTORICAL_NOHASH: 'request_context_hash' ({repr(request_context_hash)}) 为假, 'historical_messages' 保持为空列表。")
|
| 409 |
+
|
| 410 |
+
logger.info(f"[{request_id}] DEBUG_POST_HISTORICAL_EXTRACTION: 即将提取 system 和 user query。")
|
| 411 |
+
current_system_prompts_contents = [msg['content'] for msg in messages if msg.get('role') == 'system' and msg.get('content')]
|
| 412 |
+
system_prompt_combined = "\n".join(current_system_prompts_contents)
|
| 413 |
+
|
| 414 |
+
current_user_messages_contents = [msg['content'] for msg in messages if msg.get('role') == 'user' and msg.get('content')]
|
| 415 |
+
current_user_query = current_user_messages_contents[-1] if current_user_messages_contents else ""
|
| 416 |
+
|
| 417 |
+
if not current_user_query: # 此检查至关重要
|
| 418 |
+
logger.error(f"[{request_id}] 'messages' 中未找到有效的 'user' 角色的消息内容。")
|
| 419 |
+
# 记录调试消息
|
| 420 |
+
logger.debug(f"[{request_id}] 接收到的消息: {json.dumps(messages, ensure_ascii=False)}")
|
| 421 |
+
return {"error": {"message": "'messages' 中未找到有效的 'user' 角色的消息内容。", "type": "invalid_request_error", "code": "no_user_message"}}, 400
|
| 422 |
+
|
| 423 |
+
user_identifier = token
|
| 424 |
+
# 记录请求开始时间,确保在所有路径中 duration_ms 可用
|
| 425 |
+
request_start_time = time.time()
|
| 426 |
+
ondemand_client = None
|
| 427 |
+
email_for_stats = None # 此为 OnDemandAPIClient 所用账户的邮箱
|
| 428 |
+
# 初始化 is_newly_assigned_context,默认为 True,如果后续阶段匹配成功会被修改
|
| 429 |
+
is_newly_assigned_context = True
|
| 430 |
+
|
| 431 |
+
# 获取会话超时配置
|
| 432 |
+
ondemand_session_timeout_minutes = config_instance.get('ondemand_session_timeout_minutes', 30)
|
| 433 |
+
logger.info(f"[{request_id}] OnDemand 会话超时设置为: {ondemand_session_timeout_minutes} 分钟。")
|
| 434 |
+
# 将分钟转换为 timedelta 对象,便于比较
|
| 435 |
+
session_timeout_delta = timedelta(minutes=ondemand_session_timeout_minutes)
|
| 436 |
+
|
| 437 |
+
with config_instance.client_sessions_lock:
|
| 438 |
+
current_time_dt = datetime.now() # 使用 datetime 对象进行比较
|
| 439 |
+
if user_identifier not in config_instance.client_sessions:
|
| 440 |
+
config_instance.client_sessions[user_identifier] = {}
|
| 441 |
+
user_sessions_for_id = config_instance.client_sessions[user_identifier]
|
| 442 |
+
|
| 443 |
+
# 阶段 0: 优先复用“活跃”会话
|
| 444 |
+
# 遍历时按 last_time 降序排列,优先选择最近使用的活跃会话
|
| 445 |
+
sorted_sessions = sorted(
|
| 446 |
+
user_sessions_for_id.items(),
|
| 447 |
+
key=lambda item: item[1].get("last_time", datetime.min),
|
| 448 |
+
reverse=True
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
for acc_email_p0, session_data_p0 in sorted_sessions:
|
| 452 |
+
client_p0 = session_data_p0.get("client")
|
| 453 |
+
last_time_p0 = session_data_p0.get("last_time")
|
| 454 |
+
|
| 455 |
+
if client_p0 and client_p0.token and client_p0.session_id and last_time_p0:
|
| 456 |
+
if (current_time_dt - last_time_p0) < session_timeout_delta: # 使用 session_timeout_delta
|
| 457 |
+
stored_active_hash = session_data_p0.get("active_context_hash")
|
| 458 |
+
hash_match_status = "匹配" if stored_active_hash == request_context_hash else "不匹配"
|
| 459 |
+
logger.info(f"[{request_id}] 阶段0: 找到账户 {acc_email_p0} 的活跃会话。请求上下文哈希 ({request_context_hash or 'None'}) 与存储哈希 ({stored_active_hash or 'None'}) {hash_match_status}。")
|
| 460 |
+
|
| 461 |
+
# 新增:检查上下文哈希是否匹配
|
| 462 |
+
if stored_active_hash == request_context_hash:
|
| 463 |
+
# 如果哈希匹配,则复用此客户端
|
| 464 |
+
ondemand_client = client_p0
|
| 465 |
+
email_for_stats = acc_email_p0
|
| 466 |
+
ondemand_client._associated_user_identifier = user_identifier
|
| 467 |
+
ondemand_client._associated_request_ip = client_ip
|
| 468 |
+
session_data_p0["last_time"] = current_time_dt # 使用 current_time_dt
|
| 469 |
+
session_data_p0["ip"] = client_ip
|
| 470 |
+
is_newly_assigned_context = False # 复用现有活跃会话
|
| 471 |
+
logger.info(f"[{request_id}] 阶段0: 上下文哈希匹配,复用账户 {email_for_stats} 的活跃会话。")
|
| 472 |
+
break # 已找到并复用活跃客户端
|
| 473 |
+
else:
|
| 474 |
+
logger.info(f"[{request_id}] 阶段0: 上下文哈希不匹配,跳过复用此活跃会话。")
|
| 475 |
+
# Continue the loop to check other sessions or proceed to Stage 1
|
| 476 |
+
|
| 477 |
+
# 阶段 1: 若阶段0失败,则查找已服务此 context_hash 的客户端 (精确哈希匹配)
|
| 478 |
+
if not ondemand_client and request_context_hash: # 只有在 request_context_hash 存在时才进行阶段1匹配
|
| 479 |
+
for acc_email_p1, session_data_p1 in user_sessions_for_id.items(): # 无需再次排序,因为阶段0已处理最优选择
|
| 480 |
+
client_p1 = session_data_p1.get("client")
|
| 481 |
+
if client_p1 and client_p1.token and client_p1.session_id and \
|
| 482 |
+
session_data_p1.get("active_context_hash") == request_context_hash:
|
| 483 |
+
|
| 484 |
+
# 检查此精确匹配的会话是否也“活跃”,如果不活跃,可能不如创建一个新的
|
| 485 |
+
last_time_p1 = session_data_p1.get("last_time")
|
| 486 |
+
if last_time_p1 and (current_time_dt - last_time_p1) >= session_timeout_delta: # 使用 session_timeout_delta
|
| 487 |
+
logger.info(f"[{request_id}] 阶段1: 找到精确哈希匹配的账户 {acc_email_p1},但其会话已超时。将跳过并尝试创建新会话。")
|
| 488 |
+
continue # 跳过这个超时的精确匹配
|
| 489 |
+
|
| 490 |
+
ondemand_client = client_p1
|
| 491 |
+
email_for_stats = acc_email_p1
|
| 492 |
+
ondemand_client._associated_user_identifier = user_identifier
|
| 493 |
+
ondemand_client._associated_request_ip = client_ip
|
| 494 |
+
session_data_p1["last_time"] = current_time_dt # 使用 current_time_dt
|
| 495 |
+
session_data_p1["ip"] = client_ip
|
| 496 |
+
is_newly_assigned_context = False # 精确上下文匹配
|
| 497 |
+
logger.info(f"[{request_id}] 阶段1: 上下文精确匹配。复用账户 {email_for_stats} 的客户端 (上下文哈希: {request_context_hash})。")
|
| 498 |
+
break # 已找到客户端
|
| 499 |
+
|
| 500 |
+
# 阶段 2: 若阶段0和阶段1均失败,则必须创建新客户端会话
|
| 501 |
+
if not ondemand_client:
|
| 502 |
+
logger.info(f"[{request_id}] 阶段0及阶段1均未找到可复用会话 (请求上下文哈希: {request_context_hash or 'None'})。尝试获取/创建新客户端会话。")
|
| 503 |
+
MAX_ACCOUNT_ATTEMPTS = config_instance.get('max_account_attempts', 3) # 从配置获取或默认3
|
| 504 |
+
for attempt in range(MAX_ACCOUNT_ATTEMPTS):
|
| 505 |
+
new_ondemand_email, new_ondemand_password = config.get_next_ondemand_account_details()
|
| 506 |
+
if not new_ondemand_email:
|
| 507 |
+
logger.error(f"[{request_id}] 尝试 {attempt+1} 次后,配置中无可用 OnDemand 账户。")
|
| 508 |
+
break
|
| 509 |
+
|
| 510 |
+
email_for_stats = new_ondemand_email # 本次尝试暂设值
|
| 511 |
+
|
| 512 |
+
# 检查 user_identifier 是否已对 new_ondemand_email 存在会话数据,但可能 client 实例需要重建
|
| 513 |
+
# 或者这是一个全新的账户分配给此 user_identifier
|
| 514 |
+
|
| 515 |
+
# 总是尝试创建新的 OnDemandAPIClient 实例和新的 OnDemand session_id
|
| 516 |
+
# 因为到这一步意味着我们没有找到合适的现有活跃会话来复用其 session_id
|
| 517 |
+
logger.info(f"[{request_id}] 阶段2: 为账户 {new_ondemand_email} 创建新客户端实例和会话 (尝试 {attempt+1})。")
|
| 518 |
+
client_id_for_log = f"{user_identifier[:8]}-{new_ondemand_email.split('@')[0]}-{request_id[:4]}" # 更具区分度的 client_id
|
| 519 |
+
temp_ondemand_client = OnDemandAPIClient(new_ondemand_email, new_ondemand_password, client_id=client_id_for_log)
|
| 520 |
+
|
| 521 |
+
if not temp_ondemand_client.sign_in() or not temp_ondemand_client.create_session():
|
| 522 |
+
logger.error(f"[{request_id}] 为 {new_ondemand_email} 初始化新客户端会话失败: {temp_ondemand_client.last_error}")
|
| 523 |
+
# 此处不将 ondemand_client 设为 None,因为 email_for_stats 需要在失败统计时使用
|
| 524 |
+
# email_for_stats = None # 移除,以确保失败统计时有邮箱
|
| 525 |
+
continue # 尝试下一账户
|
| 526 |
+
|
| 527 |
+
ondemand_client = temp_ondemand_client # 成功创建,赋值
|
| 528 |
+
ondemand_client._associated_user_identifier = user_identifier
|
| 529 |
+
ondemand_client._associated_request_ip = client_ip
|
| 530 |
+
|
| 531 |
+
user_sessions_for_id[new_ondemand_email] = {
|
| 532 |
+
"client": ondemand_client,
|
| 533 |
+
"last_time": current_time_dt, # 使用 current_time_dt
|
| 534 |
+
"ip": client_ip,
|
| 535 |
+
"active_context_hash": request_context_hash # 新会话关联到当前请求的上下文哈希
|
| 536 |
+
}
|
| 537 |
+
is_newly_assigned_context = True # 这是一个新的 OnDemand 会话,或者为现有账户分配了新的上下文
|
| 538 |
+
logger.info(f"[{request_id}] 阶段2: 已为账户 {email_for_stats} 成功创建/分配新客户端会话 (is_newly_assigned_context=True, 关联上下文哈希: {request_context_hash or 'None'})。")
|
| 539 |
+
break # 跳出账户尝试循环,客户端就绪
|
| 540 |
+
|
| 541 |
+
if not ondemand_client: # 获取/创建客户端尝试均失败
|
| 542 |
+
# is_newly_assigned_context 此时应保持为 True (其默认值)
|
| 543 |
+
last_attempt_error = temp_ondemand_client.last_error if 'temp_ondemand_client' in locals() and temp_ondemand_client else '未知错误'
|
| 544 |
+
logger.error(f"[{request_id}] 尝试 {MAX_ACCOUNT_ATTEMPTS} 次后获取/创建客户端失败 (is_newly_assigned_context 保持为 {is_newly_assigned_context})。最后一次尝试失败原因: {last_attempt_error}")
|
| 545 |
+
|
| 546 |
+
prompt_tok_val_err, _, _ = count_message_tokens(messages, requested_model_name)
|
| 547 |
+
_update_usage_statistics(
|
| 548 |
+
config_inst=config_instance, request_id=request_id, requested_model_name=requested_model_name,
|
| 549 |
+
account_email=email_for_stats, # 可能为最后尝试的邮箱或None
|
| 550 |
+
is_success=False, duration_ms=int((time.time() - request_start_time) * 1000), # request_start_time 可能未定义
|
| 551 |
+
is_stream=stream_requested, prompt_tokens_val=prompt_tok_val_err or 0,
|
| 552 |
+
completion_tokens_val=0, total_tokens_val=prompt_tok_val_err or 0,
|
| 553 |
+
error_message=f"多次尝试后获取/创建客户端会话失败。最后一次尝试失败原因: {last_attempt_error}"
|
| 554 |
+
)
|
| 555 |
+
return {"error": {"message": f"当前无法与 OnDemand 服务建立会话。最后一次尝试失败原因: {last_attempt_error}", "type": "api_error", "code": "ondemand_session_unavailable"}}, 503
|
| 556 |
+
|
| 557 |
+
# --- 会话管理结束 ---
|
| 558 |
+
|
| 559 |
+
# 4. 基于 is_newly_assigned_context 构建 final_query_to_ondemand
|
| 560 |
+
final_query_to_ondemand = ""
|
| 561 |
+
query_parts = []
|
| 562 |
+
|
| 563 |
+
# 在构建查询之前,记录关键变量的状态
|
| 564 |
+
logger.debug(f"[{request_id}] 查询构建前状态:is_newly_assigned_context={is_newly_assigned_context}, request_context_hash='{request_context_hash}', historical_messages_empty={not bool(historical_messages)}")
|
| 565 |
+
if historical_messages: # 只在列表非空时尝试序列化
|
| 566 |
+
logger.debug(f"[{request_id}] 查询构建前状态:historical_messages 内容: {json.dumps(historical_messages, ensure_ascii=False, indent=2)}")
|
| 567 |
+
else:
|
| 568 |
+
logger.debug(f"[{request_id}] 查询构建前状态:historical_messages 为空列表。")
|
| 569 |
+
|
| 570 |
+
if is_newly_assigned_context:
|
| 571 |
+
# 阶段2:新建/重分配会话
|
| 572 |
+
logger.info(f"[{request_id}] 查询构建:会话为新建/重分配 (is_newly_assigned_context=True, 账户: {email_for_stats})。")
|
| 573 |
+
|
| 574 |
+
# 在新建会话时,如果存在系统提示,则添加到 query_parts
|
| 575 |
+
if system_prompt_combined:
|
| 576 |
+
query_parts.append(f"System: {system_prompt_combined}")
|
| 577 |
+
logger.debug(f"[{request_id}] 查询构建:新建会话,添加了合并的系统提示。")
|
| 578 |
+
|
| 579 |
+
if request_context_hash and historical_messages: # 有历史上下文 (historical_messages 已在前面提取)
|
| 580 |
+
logger.info(f"[{request_id}] 查询构建:存在历史上下文 ({request_context_hash}),将发送历史消息。")
|
| 581 |
+
formatted_historical_parts = []
|
| 582 |
+
for msg in historical_messages: # historical_messages 是 messages[:last_user_idx]
|
| 583 |
+
role = msg.get('role', 'unknown').capitalize()
|
| 584 |
+
content = msg.get('content', '')
|
| 585 |
+
if content: formatted_historical_parts.append(f"{role}: {content}")
|
| 586 |
+
if formatted_historical_parts: query_parts.append("\n".join(formatted_historical_parts))
|
| 587 |
+
else: # 无历史上下文 (例如对话首条消息,或 request_context_hash 为 None)
|
| 588 |
+
logger.info(f"[{request_id}] 查询构建:无历史上下文。仅发送当前用户查询。") # 系统提示已在前面处理
|
| 589 |
+
|
| 590 |
+
else:
|
| 591 |
+
# 阶段0或阶段1:复用现有会话
|
| 592 |
+
# 不发送 historical_messages 和 system prompt,信任 OnDemand API 通过 session_id 维护上下文
|
| 593 |
+
stored_active_hash = "N/A"
|
| 594 |
+
if ondemand_client: # ondemand_client 应该总是存在的,除非前面逻辑有误
|
| 595 |
+
# 尝试从 client_sessions 获取最新的哈希,因为 client 实例可能刚被更新
|
| 596 |
+
client_session_data = config_instance.client_sessions.get(user_identifier, {}).get(email_for_stats, {})
|
| 597 |
+
stored_active_hash = client_session_data.get('active_context_hash', 'N/A')
|
| 598 |
+
|
| 599 |
+
hash_match_status = "匹配" if stored_active_hash == request_context_hash else "不匹配"
|
| 600 |
+
logger.info(f"[{request_id}] 查询构建:复用现有会话 (is_newly_assigned_context=False, 账户: {email_for_stats})。不发送历史消息或系统提示。请求上下文哈希 ({request_context_hash or 'None'}) 与存储哈希 ({stored_active_hash or 'None'}) {hash_match_status}。")
|
| 601 |
+
|
| 602 |
+
# 始终添加当前用户查询
|
| 603 |
+
if current_user_query: # current_user_query 是 messages 中最后一个用户消息的内容
|
| 604 |
+
query_parts.append(f"User: {current_user_query}")
|
| 605 |
+
logger.debug(f"[{request_id}] 查询构建:添加了当前用户查询。")
|
| 606 |
+
else: # 此情况应在早期被捕获 (messages 中无 user role)
|
| 607 |
+
logger.error(f"[{request_id}] 严重错误: 最终查询构建时 current_user_query 为空!")
|
| 608 |
+
if not query_parts: query_parts.append(" ") # 确保查询非空
|
| 609 |
+
|
| 610 |
+
final_query_to_ondemand = "\n\n".join(filter(None, query_parts))
|
| 611 |
+
if not final_query_to_ondemand.strip(): # 确保查询字符串实际有内容
|
| 612 |
+
logger.warning(f"[{request_id}] 构建的查询为空或全为空格。发送占位符查询。")
|
| 613 |
+
final_query_to_ondemand = " "
|
| 614 |
+
|
| 615 |
+
logger.info(f"[{request_id}] 构建的 OnDemand 查询 (前1000字符): {final_query_to_ondemand[:1000]}...")
|
| 616 |
+
|
| 617 |
+
# 根据请求的模型名称获取 on-demand.io 的 endpoint_id
|
| 618 |
+
endpoint_id = model_mapping.get(requested_model_name, default_endpoint_id)
|
| 619 |
+
if requested_model_name not in model_mapping:
|
| 620 |
+
logger.warning(f"[{request_id}] 模型 '{requested_model_name}' 不在映射表中, 将使用默认端点 '{default_endpoint_id}'.")
|
| 621 |
+
|
| 622 |
+
# 构建模型配置,只包含用户明确提供的参数
|
| 623 |
+
model_configs = {}
|
| 624 |
+
|
| 625 |
+
# 构建模型配置,只包含用户明确提供的参数 (值为None的参数不会被包含)
|
| 626 |
+
if temperature is not None:
|
| 627 |
+
model_configs["temperature"] = temperature
|
| 628 |
+
if max_tokens is not None:
|
| 629 |
+
model_configs["maxTokens"] = max_tokens
|
| 630 |
+
if top_p is not None:
|
| 631 |
+
model_configs["topP"] = top_p
|
| 632 |
+
if frequency_penalty is not None:
|
| 633 |
+
model_configs["frequency_penalty"] = frequency_penalty
|
| 634 |
+
if presence_penalty is not None:
|
| 635 |
+
model_configs["presence_penalty"] = presence_penalty
|
| 636 |
+
|
| 637 |
+
logger.info(f"[{request_id}] 构建的模型配置: {json.dumps(model_configs, ensure_ascii=False)}")
|
| 638 |
+
|
| 639 |
+
# request_start_time 已移至会话管理之前
|
| 640 |
+
|
| 641 |
+
# 在调用 send_query 之前,将 request_context_hash 存储到 ondemand_client 实例上
|
| 642 |
+
# 以便在 RateLimitStrategy 中进行账户切换时可以访问到它
|
| 643 |
+
if ondemand_client: #确保 ondemand_client 不是 None
|
| 644 |
+
ondemand_client._current_request_context_hash = request_context_hash
|
| 645 |
+
logger.debug(f"[{request_id}] Stored request_context_hash ('{request_context_hash}') onto ondemand_client instance before send_query.")
|
| 646 |
+
else:
|
| 647 |
+
logger.error(f"[{request_id}] CRITICAL: ondemand_client is None before send_query. This should not happen.")
|
| 648 |
+
# 可以在这里决定是否提前返回错误,或者让后续的 send_query 调用失败
|
| 649 |
+
# 为安全起见,如果 ondemand_client 为 None,后续调用会 AttributeError
|
| 650 |
+
|
| 651 |
+
# 使用特定于此 IP 的客户端实例向 OnDemand API 发送查询
|
| 652 |
+
ondemand_result = ondemand_client.send_query(final_query_to_ondemand, endpoint_id=endpoint_id,
|
| 653 |
+
stream=stream_requested, model_configs_input=model_configs)
|
| 654 |
+
|
| 655 |
+
# 处理响应
|
| 656 |
+
if stream_requested:
|
| 657 |
+
# 流式响应
|
| 658 |
+
def generate_openai_stream(captured_initial_request_messages: List[Dict[str, str]]):
|
| 659 |
+
full_assistant_reply_parts = [] # For aggregating streamed reply
|
| 660 |
+
stream_response_obj = ondemand_result.get("response_obj")
|
| 661 |
+
if not stream_response_obj: # 确保 response_obj 存在
|
| 662 |
+
# 计算token数量(仅提示部分,因为流式响应无法准确计算完成tokens)
|
| 663 |
+
prompt_tokens, _, _ = count_message_tokens(messages, requested_model_name)
|
| 664 |
+
# 确保prompt_tokens不为None
|
| 665 |
+
if prompt_tokens is None:
|
| 666 |
+
prompt_tokens = 0
|
| 667 |
+
# 错误情况下,完成tokens为0
|
| 668 |
+
estimated_completion_tokens = 0
|
| 669 |
+
# 错误情况下,总tokens等于提示tokens
|
| 670 |
+
estimated_total_tokens = prompt_tokens
|
| 671 |
+
|
| 672 |
+
error_json = {
|
| 673 |
+
"id": request_id,
|
| 674 |
+
"object": "chat.completion.chunk",
|
| 675 |
+
"created": int(time.time()),
|
| 676 |
+
"model": requested_model_name,
|
| 677 |
+
"choices": [{"delta": {"content": "[流错误:未获取到响应对象]"}, "index": 0, "finish_reason": "error"}],
|
| 678 |
+
"usage": { # 添加token统计信息
|
| 679 |
+
"prompt_tokens": prompt_tokens,
|
| 680 |
+
"completion_tokens": estimated_completion_tokens,
|
| 681 |
+
"total_tokens": estimated_total_tokens
|
| 682 |
+
}
|
| 683 |
+
}
|
| 684 |
+
yield f"data: {json.dumps(error_json, ensure_ascii=False)}\n\n"
|
| 685 |
+
yield "data: [DONE]\n\n"
|
| 686 |
+
return
|
| 687 |
+
|
| 688 |
+
logger.info(f"[{request_id}] 开始流式传输 OpenAI 格式的响应。")
|
| 689 |
+
# 初始化token计数变量
|
| 690 |
+
actual_input_tokens = None
|
| 691 |
+
actual_output_tokens = None
|
| 692 |
+
actual_total_tokens = None
|
| 693 |
+
|
| 694 |
+
try:
|
| 695 |
+
for line in stream_response_obj.iter_lines():
|
| 696 |
+
if line:
|
| 697 |
+
decoded_line = line.decode('utf-8')
|
| 698 |
+
if decoded_line.startswith("data:"):
|
| 699 |
+
json_str = decoded_line[len("data:"):].strip()
|
| 700 |
+
if json_str == "[DONE]": # 这是 on-demand.io 的结束标记
|
| 701 |
+
break # 我们将在循环外发送 OpenAI 的 [DONE]
|
| 702 |
+
try:
|
| 703 |
+
event_data = json.loads(json_str)
|
| 704 |
+
event_type = event_data.get("eventType", "")
|
| 705 |
+
|
| 706 |
+
# 处理内容块
|
| 707 |
+
if event_type == "fulfillment":
|
| 708 |
+
content_chunk = event_data.get("answer", "")
|
| 709 |
+
if content_chunk is not None: # 确保 content_chunk 不是 None
|
| 710 |
+
full_assistant_reply_parts.append(content_chunk) # Aggregate
|
| 711 |
+
openai_chunk = {
|
| 712 |
+
"id": request_id,
|
| 713 |
+
"object": "chat.completion.chunk",
|
| 714 |
+
"created": int(time.time()),
|
| 715 |
+
"model": requested_model_name,
|
| 716 |
+
"choices": [
|
| 717 |
+
{
|
| 718 |
+
"delta": {"content": content_chunk},
|
| 719 |
+
"index": 0,
|
| 720 |
+
"finish_reason": None # 流式传输过程中 finish_reason 为 None
|
| 721 |
+
}
|
| 722 |
+
]
|
| 723 |
+
}
|
| 724 |
+
yield f"data: {json.dumps(openai_chunk, ensure_ascii=False)}\n\n"
|
| 725 |
+
|
| 726 |
+
# 从metrics事件中提取准确的token计数
|
| 727 |
+
elif event_type == "metricsLog":
|
| 728 |
+
public_metrics = event_data.get("publicMetrics", {})
|
| 729 |
+
if public_metrics:
|
| 730 |
+
# 确保获取到的token计数是整数,避免None值
|
| 731 |
+
actual_input_tokens = public_metrics.get("inputTokens", 0)
|
| 732 |
+
if actual_input_tokens is None:
|
| 733 |
+
actual_input_tokens = 0
|
| 734 |
+
|
| 735 |
+
actual_output_tokens = public_metrics.get("outputTokens", 0)
|
| 736 |
+
if actual_output_tokens is None:
|
| 737 |
+
actual_output_tokens = 0
|
| 738 |
+
|
| 739 |
+
actual_total_tokens = public_metrics.get("totalTokens", 0)
|
| 740 |
+
if actual_total_tokens is None:
|
| 741 |
+
actual_total_tokens = 0
|
| 742 |
+
|
| 743 |
+
logger.info(f"[{request_id}] 从metricsLog获取到准确的token计数: 输入={actual_input_tokens}, 输出={actual_output_tokens}, 总计={actual_total_tokens}")
|
| 744 |
+
|
| 745 |
+
except json.JSONDecodeError:
|
| 746 |
+
logger.warning(f"[{request_id}] 流式传输中 JSONDecodeError: {json_str}")
|
| 747 |
+
continue # 跳过无法解析的行
|
| 748 |
+
|
| 749 |
+
# 如果没有从metrics中获取到准确的token计数,则使用估算方法
|
| 750 |
+
if actual_input_tokens == 0 or actual_output_tokens == 0 or actual_total_tokens == 0:
|
| 751 |
+
logger.warning(f"[{request_id}] 未从metricsLog获取到有效的token计数,使用估算方法")
|
| 752 |
+
prompt_tokens, _, _ = count_message_tokens(messages, requested_model_name)
|
| 753 |
+
# 确保prompt_tokens不为None
|
| 754 |
+
if prompt_tokens is None:
|
| 755 |
+
prompt_tokens = 0
|
| 756 |
+
estimated_completion_tokens = max(1, prompt_tokens // 2) # 确保至少为1
|
| 757 |
+
estimated_total_tokens = prompt_tokens + estimated_completion_tokens
|
| 758 |
+
else:
|
| 759 |
+
# 使用从metrics中获取的准确token计数
|
| 760 |
+
prompt_tokens = actual_input_tokens
|
| 761 |
+
estimated_completion_tokens = actual_output_tokens
|
| 762 |
+
estimated_total_tokens = actual_total_tokens
|
| 763 |
+
|
| 764 |
+
# 循环结束后,发送 OpenAI 流的终止块
|
| 765 |
+
final_chunk = {
|
| 766 |
+
"id": request_id,
|
| 767 |
+
"object": "chat.completion.chunk",
|
| 768 |
+
"created": int(time.time()),
|
| 769 |
+
"model": requested_model_name,
|
| 770 |
+
"choices": [{"delta": {}, "index": 0, "finish_reason": "stop"}], # 标准的结束方式
|
| 771 |
+
"usage": { # 添加token统计信息
|
| 772 |
+
"prompt_tokens": prompt_tokens,
|
| 773 |
+
"completion_tokens": estimated_completion_tokens,
|
| 774 |
+
"total_tokens": estimated_total_tokens
|
| 775 |
+
}
|
| 776 |
+
}
|
| 777 |
+
yield f"data: {json.dumps(final_chunk, ensure_ascii=False)}\n\n"
|
| 778 |
+
yield "data: [DONE]\n\n" # OpenAI 流的最终结束标记
|
| 779 |
+
logger.info(f"[{request_id}] 完成 OpenAI 格式响应的流式传输。")
|
| 780 |
+
|
| 781 |
+
full_streamed_reply = "".join(full_assistant_reply_parts)
|
| 782 |
+
|
| 783 |
+
# 更新使用统计
|
| 784 |
+
request_duration_val = int((time.time() - request_start_time) * 1000)
|
| 785 |
+
final_prompt_tokens_for_stats = actual_input_tokens if actual_input_tokens is not None and actual_input_tokens > 0 else prompt_tokens
|
| 786 |
+
final_completion_tokens_for_stats = actual_output_tokens if actual_output_tokens is not None and actual_output_tokens > 0 else estimated_completion_tokens
|
| 787 |
+
final_total_tokens_for_stats = actual_total_tokens if actual_total_tokens is not None and actual_total_tokens > 0 else estimated_total_tokens
|
| 788 |
+
used_actual_for_history = actual_input_tokens is not None and actual_input_tokens > 0
|
| 789 |
+
|
| 790 |
+
_update_usage_statistics(
|
| 791 |
+
config_inst=config_instance,
|
| 792 |
+
request_id=request_id,
|
| 793 |
+
requested_model_name=requested_model_name,
|
| 794 |
+
account_email=ondemand_client.email,
|
| 795 |
+
is_success=True,
|
| 796 |
+
duration_ms=request_duration_val,
|
| 797 |
+
is_stream=True,
|
| 798 |
+
prompt_tokens_val=final_prompt_tokens_for_stats,
|
| 799 |
+
completion_tokens_val=final_completion_tokens_for_stats,
|
| 800 |
+
total_tokens_val=final_total_tokens_for_stats,
|
| 801 |
+
prompt_length=len(final_query_to_ondemand),
|
| 802 |
+
used_actual_tokens_for_history=used_actual_for_history
|
| 803 |
+
)
|
| 804 |
+
|
| 805 |
+
# 更新客户端的 active_context_hash 以反映对话进展
|
| 806 |
+
_update_client_context_hash_after_reply(
|
| 807 |
+
original_request_messages=captured_initial_request_messages,
|
| 808 |
+
assistant_reply_content=full_streamed_reply,
|
| 809 |
+
request_id=request_id,
|
| 810 |
+
user_identifier=token, # user_identifier is token
|
| 811 |
+
email_for_stats=ondemand_client.email, # <--- 使用 ondemand_client 当前的 email
|
| 812 |
+
current_ondemand_client_instance=ondemand_client,
|
| 813 |
+
config_inst=config_instance,
|
| 814 |
+
logger_instance=logger
|
| 815 |
+
)
|
| 816 |
+
except Exception as e: # 捕获流处理过程中的任何异常
|
| 817 |
+
logger.error(f"[{request_id}] 流式传输过程中发生错误: {e}")
|
| 818 |
+
# 在流错误的情况下,不更新 active_context_hash,因为它可能基于不完整的对话
|
| 819 |
+
# 计算token数量(仅提示部分,因为流式响应无法准确计算完成tokens)
|
| 820 |
+
prompt_tokens, _, _ = count_message_tokens(messages, requested_model_name)
|
| 821 |
+
# 确保prompt_tokens不为None
|
| 822 |
+
if prompt_tokens is None:
|
| 823 |
+
prompt_tokens = 0
|
| 824 |
+
# 错误情况下,完成tokens为0
|
| 825 |
+
estimated_completion_tokens = 0
|
| 826 |
+
# 错误情况下,总tokens等于提示tokens
|
| 827 |
+
estimated_total_tokens = prompt_tokens
|
| 828 |
+
|
| 829 |
+
error_json = { # 发送一个错误块
|
| 830 |
+
"id": request_id,
|
| 831 |
+
"object": "chat.completion.chunk",
|
| 832 |
+
"created": int(time.time()),
|
| 833 |
+
"model": requested_model_name,
|
| 834 |
+
"choices": [{"delta": {"content": f"[流处理异常: {str(e)}]"}, "index": 0, "finish_reason": "error"}],
|
| 835 |
+
"usage": { # 添加token统计信息
|
| 836 |
+
"prompt_tokens": prompt_tokens,
|
| 837 |
+
"completion_tokens": estimated_completion_tokens,
|
| 838 |
+
"total_tokens": estimated_total_tokens
|
| 839 |
+
}
|
| 840 |
+
}
|
| 841 |
+
yield f"data: {json.dumps(error_json, ensure_ascii=False)}\n\n"
|
| 842 |
+
yield "data: [DONE]\n\n"
|
| 843 |
+
|
| 844 |
+
# 更新使用统计 - 失败的流式请求
|
| 845 |
+
request_duration_val = int((time.time() - request_start_time) * 1000)
|
| 846 |
+
_update_usage_statistics(
|
| 847 |
+
config_inst=config_instance,
|
| 848 |
+
request_id=request_id,
|
| 849 |
+
requested_model_name=requested_model_name,
|
| 850 |
+
account_email=ondemand_client.email if ondemand_client else email_for_stats,
|
| 851 |
+
is_success=False,
|
| 852 |
+
duration_ms=request_duration_val,
|
| 853 |
+
is_stream=True,
|
| 854 |
+
prompt_tokens_val=prompt_tokens if prompt_tokens is not None else 0,
|
| 855 |
+
completion_tokens_val=0,
|
| 856 |
+
total_tokens_val=prompt_tokens if prompt_tokens is not None else 0,
|
| 857 |
+
error_message=str(e)
|
| 858 |
+
)
|
| 859 |
+
finally:
|
| 860 |
+
if stream_response_obj: # 确保关闭响应对象
|
| 861 |
+
stream_response_obj.close()
|
| 862 |
+
|
| 863 |
+
return Response(stream_with_context(generate_openai_stream(initial_messages_from_request)), content_type='text/event-stream; charset=utf-8')
|
| 864 |
+
else:
|
| 865 |
+
# 非流式响应
|
| 866 |
+
final_content = ondemand_result.get("content", "")
|
| 867 |
+
|
| 868 |
+
# 计算token数量
|
| 869 |
+
prompt_tokens, completion_tokens, total_tokens = count_message_tokens(messages, requested_model_name)
|
| 870 |
+
completion_tokens_actual = count_tokens(final_content, requested_model_name)
|
| 871 |
+
total_tokens_actual = prompt_tokens + completion_tokens_actual
|
| 872 |
+
|
| 873 |
+
openai_response = {
|
| 874 |
+
"id": request_id,
|
| 875 |
+
"object": "chat.completion",
|
| 876 |
+
"created": int(time.time()),
|
| 877 |
+
"model": requested_model_name,
|
| 878 |
+
"choices": [
|
| 879 |
+
{
|
| 880 |
+
"message": {
|
| 881 |
+
"role": "assistant",
|
| 882 |
+
"content": final_content
|
| 883 |
+
},
|
| 884 |
+
"finish_reason": "stop", # 假设成功完成则为 "stop"
|
| 885 |
+
"index": 0
|
| 886 |
+
}
|
| 887 |
+
],
|
| 888 |
+
"usage": { # 计算token数量
|
| 889 |
+
"prompt_tokens": prompt_tokens,
|
| 890 |
+
"completion_tokens": completion_tokens_actual,
|
| 891 |
+
"total_tokens": total_tokens_actual
|
| 892 |
+
}
|
| 893 |
+
}
|
| 894 |
+
logger.info(f"[{request_id}] 发送非流式 OpenAI 格式的响应。")
|
| 895 |
+
|
| 896 |
+
# 更新使用统计 - 非流式成功请求
|
| 897 |
+
request_duration_val = int((time.time() - request_start_time) * 1000)
|
| 898 |
+
_update_usage_statistics(
|
| 899 |
+
config_inst=config_instance,
|
| 900 |
+
request_id=request_id,
|
| 901 |
+
requested_model_name=requested_model_name,
|
| 902 |
+
account_email=ondemand_client.email,
|
| 903 |
+
is_success=True,
|
| 904 |
+
duration_ms=request_duration_val,
|
| 905 |
+
is_stream=False,
|
| 906 |
+
prompt_tokens_val=prompt_tokens,
|
| 907 |
+
completion_tokens_val=completion_tokens_actual,
|
| 908 |
+
total_tokens_val=total_tokens_actual,
|
| 909 |
+
prompt_length=len(final_query_to_ondemand),
|
| 910 |
+
completion_length=len(final_content) if final_content else 0,
|
| 911 |
+
used_actual_tokens_for_history=True
|
| 912 |
+
)
|
| 913 |
+
|
| 914 |
+
# 更新客户端的 active_context_hash 以反映对话进展
|
| 915 |
+
_update_client_context_hash_after_reply(
|
| 916 |
+
original_request_messages=initial_messages_from_request,
|
| 917 |
+
assistant_reply_content=final_content,
|
| 918 |
+
request_id=request_id,
|
| 919 |
+
user_identifier=token, # user_identifier is token
|
| 920 |
+
email_for_stats=ondemand_client.email, # <--- 使用 ondemand_client 当前的 email
|
| 921 |
+
current_ondemand_client_instance=ondemand_client,
|
| 922 |
+
config_inst=config_instance,
|
| 923 |
+
logger_instance=logger
|
| 924 |
+
)
|
| 925 |
+
|
| 926 |
+
return openai_response
|
| 927 |
+
|
| 928 |
+
@app.route('/', methods=['GET'])
|
| 929 |
+
def show_stats():
|
| 930 |
+
"""显示用量统计信息的HTML页面"""
|
| 931 |
+
current_time = datetime.now()
|
| 932 |
+
current_time_str = current_time.strftime('%Y-%m-%d %H:%M:%S')
|
| 933 |
+
current_date = current_time.strftime('%Y-%m-%d')
|
| 934 |
+
|
| 935 |
+
with config_instance.usage_stats_lock:
|
| 936 |
+
# 复制基础统计数据
|
| 937 |
+
total_requests = config_instance.usage_stats["total_requests"]
|
| 938 |
+
successful_requests = config_instance.usage_stats["successful_requests"]
|
| 939 |
+
failed_requests = config_instance.usage_stats["failed_requests"]
|
| 940 |
+
total_prompt_tokens = config_instance.usage_stats["total_prompt_tokens"]
|
| 941 |
+
total_completion_tokens = config_instance.usage_stats["total_completion_tokens"]
|
| 942 |
+
total_tokens = config_instance.usage_stats["total_tokens"]
|
| 943 |
+
|
| 944 |
+
# 计算成功率(整数百分比)
|
| 945 |
+
success_rate = int((successful_requests / total_requests * 100) if total_requests > 0 else 0)
|
| 946 |
+
|
| 947 |
+
# 计算平均响应时间
|
| 948 |
+
successful_history = [req for req in config_instance.usage_stats["request_history"] if req.get('success', False)]
|
| 949 |
+
total_duration = sum(req.get('duration_ms', 0) for req in successful_history)
|
| 950 |
+
avg_duration = (total_duration / successful_requests) if successful_requests > 0 else 0
|
| 951 |
+
|
| 952 |
+
# 计算最快响应时间
|
| 953 |
+
min_duration = min([req.get('duration_ms', float('inf')) for req in successful_history]) if successful_history else 0
|
| 954 |
+
|
| 955 |
+
# 计算今日请求数和增长率
|
| 956 |
+
today_requests = config_instance.usage_stats["daily_usage"].get(current_date, 0)
|
| 957 |
+
# 确保不会出现除以零或None值的情况
|
| 958 |
+
if total_requests is None or today_requests is None:
|
| 959 |
+
growth_rate = 0
|
| 960 |
+
elif total_requests == today_requests or (total_requests - today_requests) <= 0:
|
| 961 |
+
growth_rate = 100 # 如果所有请求都是今天的,增长率为100%
|
| 962 |
+
else:
|
| 963 |
+
growth_rate = (today_requests / (total_requests - today_requests) * 100)
|
| 964 |
+
|
| 965 |
+
# 计算估算成本 - 使用模型价格配置
|
| 966 |
+
total_cost = 0.0
|
| 967 |
+
model_costs = {} # 存储每个模型的成本
|
| 968 |
+
|
| 969 |
+
# 获取请求历史中的token使用情况
|
| 970 |
+
for req in successful_history:
|
| 971 |
+
model_name = req.get('model', '')
|
| 972 |
+
# 从配置获取模型价格
|
| 973 |
+
all_model_prices = config_instance.get('model_prices', {})
|
| 974 |
+
default_model_price = config_instance.get('default_model_price', {'input': 0.50 / 1000000, 'output': 2.00 / 1000000}) # 提供备用默认值
|
| 975 |
+
model_price = all_model_prices.get(model_name, default_model_price)
|
| 976 |
+
|
| 977 |
+
# 获取输入和输出token数量
|
| 978 |
+
input_tokens = req.get('prompt_tokens', 0)
|
| 979 |
+
|
| 980 |
+
# 根据是否有准确的completion_tokens字段决定使用哪个字段
|
| 981 |
+
if 'completion_tokens' in req:
|
| 982 |
+
output_tokens = req.get('completion_tokens', 0)
|
| 983 |
+
else:
|
| 984 |
+
output_tokens = req.get('estimated_completion_tokens', 0)
|
| 985 |
+
|
| 986 |
+
# 计算此次请求的成本
|
| 987 |
+
request_cost = (input_tokens * model_price['input']) + (output_tokens * model_price['output'])
|
| 988 |
+
total_cost += request_cost
|
| 989 |
+
|
| 990 |
+
# 累加到模型成本中
|
| 991 |
+
if model_name not in model_costs:
|
| 992 |
+
model_costs[model_name] = 0
|
| 993 |
+
model_costs[model_name] += request_cost
|
| 994 |
+
|
| 995 |
+
# 计算平均成本
|
| 996 |
+
avg_cost = (total_cost / successful_requests) if successful_requests > 0 else 0
|
| 997 |
+
|
| 998 |
+
# ���取最常用模型
|
| 999 |
+
model_usage = dict(config_instance.usage_stats["model_usage"])
|
| 1000 |
+
top_models = sorted(model_usage.items(), key=lambda x: x[1], reverse=True)
|
| 1001 |
+
top_model = top_models[0] if top_models else None
|
| 1002 |
+
|
| 1003 |
+
# 构建完整的统计数据字典
|
| 1004 |
+
stats = {
|
| 1005 |
+
"total_requests": total_requests,
|
| 1006 |
+
"successful_requests": successful_requests,
|
| 1007 |
+
"failed_requests": failed_requests,
|
| 1008 |
+
"success_rate": success_rate,
|
| 1009 |
+
"avg_duration": avg_duration,
|
| 1010 |
+
"min_duration": min_duration,
|
| 1011 |
+
"today_requests": today_requests,
|
| 1012 |
+
"growth_rate": growth_rate,
|
| 1013 |
+
"total_prompt_tokens": total_prompt_tokens,
|
| 1014 |
+
"total_completion_tokens": total_completion_tokens,
|
| 1015 |
+
"total_tokens": total_tokens,
|
| 1016 |
+
"total_cost": total_cost,
|
| 1017 |
+
"avg_cost": avg_cost,
|
| 1018 |
+
"model_usage": model_usage,
|
| 1019 |
+
"model_costs": model_costs, # 添加每个模型的成本
|
| 1020 |
+
"top_model": top_model,
|
| 1021 |
+
"model_tokens": dict(config_instance.usage_stats["model_tokens"]),
|
| 1022 |
+
"account_usage": dict(config_instance.usage_stats["account_usage"]),
|
| 1023 |
+
"daily_usage": dict(sorted(config_instance.usage_stats["daily_usage"].items(), reverse=True)[:30]), # 最近30天
|
| 1024 |
+
"hourly_usage": dict(sorted(config_instance.usage_stats["hourly_usage"].items(), reverse=True)[:48]), # 最近48小时
|
| 1025 |
+
"request_history": list(config_instance.usage_stats["request_history"][:50]),
|
| 1026 |
+
"daily_tokens": dict(sorted(config_instance.usage_stats["daily_tokens"].items(), reverse=True)[:30]), # 最近30天
|
| 1027 |
+
"hourly_tokens": dict(sorted(config_instance.usage_stats["hourly_tokens"].items(), reverse=True)[:48]), # 最近48小时
|
| 1028 |
+
"last_saved": config_instance.usage_stats.get("last_saved", "从未保存")
|
| 1029 |
+
}
|
| 1030 |
+
|
| 1031 |
+
# 使用render_template渲染模板
|
| 1032 |
+
return render_template('stats.html', stats=stats, current_time=current_time_str)
|
| 1033 |
+
|
| 1034 |
+
@app.route('/save_stats', methods=['POST'])
|
| 1035 |
+
def save_stats():
|
| 1036 |
+
"""手动保存统计数据"""
|
| 1037 |
+
try:
|
| 1038 |
+
config_instance.save_stats_to_file()
|
| 1039 |
+
logger.info("统计数据已手动保存")
|
| 1040 |
+
return redirect(url_for('show_stats'))
|
| 1041 |
+
except Exception as e:
|
| 1042 |
+
logger.error(f"手动保存统计数据时出错: {e}")
|
| 1043 |
+
return jsonify({"status": "error", "message": str(e)}), 500
|
static/css/styles.css
ADDED
|
@@ -0,0 +1,698 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
:root {
|
| 2 |
+
--primary-color: #3498db;
|
| 3 |
+
--secondary-color: #2c3e50;
|
| 4 |
+
--success-color: #27ae60;
|
| 5 |
+
--info-color: #3498db;
|
| 6 |
+
--warning-color: #f39c12;
|
| 7 |
+
--danger-color: #e74c3c;
|
| 8 |
+
--light-bg: #f5f5f5;
|
| 9 |
+
--card-bg: #f8f9fa;
|
| 10 |
+
--border-color: #ddd;
|
| 11 |
+
--shadow-color: rgba(0,0,0,0.1);
|
| 12 |
+
--text-color: #333;
|
| 13 |
+
--heading-color: #2c3e50;
|
| 14 |
+
--button-hover: #2980b9;
|
| 15 |
+
--save-button: #e67e22;
|
| 16 |
+
--save-button-hover: #d35400;
|
| 17 |
+
--refresh-button: #2ecc71;
|
| 18 |
+
--refresh-button-hover: #27ae60;
|
| 19 |
+
--chart-bg: #fff;
|
| 20 |
+
--table-header-bg: #3498db;
|
| 21 |
+
--table-row-hover: #f5f5f5;
|
| 22 |
+
--table-border: #ddd;
|
| 23 |
+
--success-text: #27ae60;
|
| 24 |
+
--fail-text: #e74c3c;
|
| 25 |
+
--header-height: 60px;
|
| 26 |
+
--footer-height: 60px;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
/* 暗黑模式变量 */
|
| 30 |
+
body.dark-mode {
|
| 31 |
+
--primary-color: #2980b9;
|
| 32 |
+
--secondary-color: #34495e;
|
| 33 |
+
--light-bg: #1a1a1a;
|
| 34 |
+
--card-bg: #2c2c2c;
|
| 35 |
+
--border-color: #444;
|
| 36 |
+
--shadow-color: rgba(0,0,0,0.3);
|
| 37 |
+
--text-color: #f5f5f5;
|
| 38 |
+
--heading-color: #f5f5f5;
|
| 39 |
+
--button-hover: #3498db;
|
| 40 |
+
--chart-bg: #2c2c2c;
|
| 41 |
+
--table-header-bg: #2980b9;
|
| 42 |
+
--table-row-hover: #3a3a3a;
|
| 43 |
+
--table-border: #444;
|
| 44 |
+
--save-button: #d35400;
|
| 45 |
+
--save-button-hover: #e67e22;
|
| 46 |
+
--refresh-button: #27ae60;
|
| 47 |
+
--refresh-button-hover: #2ecc71;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
* {
|
| 51 |
+
box-sizing: border-box;
|
| 52 |
+
margin: 0;
|
| 53 |
+
padding: 0;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
body {
|
| 57 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
| 58 |
+
margin: 0;
|
| 59 |
+
padding: 0;
|
| 60 |
+
background-color: var(--light-bg);
|
| 61 |
+
color: var(--text-color);
|
| 62 |
+
line-height: 1.6;
|
| 63 |
+
transition: background-color 0.3s ease, color 0.3s ease;
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
body.dark-mode {
|
| 67 |
+
background-color: var(--light-bg);
|
| 68 |
+
color: var(--text-color);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
/* 主布局结构 */
|
| 72 |
+
.dashboard-wrapper {
|
| 73 |
+
display: flex;
|
| 74 |
+
min-height: 100vh;
|
| 75 |
+
position: relative;
|
| 76 |
+
flex-direction: column;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
/* 主内容区域 */
|
| 80 |
+
.main-content {
|
| 81 |
+
flex: 1;
|
| 82 |
+
min-height: 100vh;
|
| 83 |
+
display: flex;
|
| 84 |
+
flex-direction: column;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/* 主内容头部 */
|
| 88 |
+
.main-header {
|
| 89 |
+
background-color: var(--card-bg);
|
| 90 |
+
padding: 1rem 1.5rem;
|
| 91 |
+
box-shadow: 0 2px 5px var(--shadow-color);
|
| 92 |
+
display: flex;
|
| 93 |
+
justify-content: space-between;
|
| 94 |
+
align-items: center;
|
| 95 |
+
position: sticky;
|
| 96 |
+
top: 0;
|
| 97 |
+
z-index: 90;
|
| 98 |
+
height: var(--header-height);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
.header-left {
|
| 102 |
+
display: flex;
|
| 103 |
+
align-items: center;
|
| 104 |
+
gap: 1rem;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
.header-left h1 {
|
| 108 |
+
font-size: 1.8rem;
|
| 109 |
+
margin: 0;
|
| 110 |
+
color: var(--primary-color);
|
| 111 |
+
display: flex;
|
| 112 |
+
align-items: center;
|
| 113 |
+
gap: 0.5rem;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
.header-right {
|
| 117 |
+
display: flex;
|
| 118 |
+
align-items: center;
|
| 119 |
+
gap: 1.5rem;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
/* 自动刷新进度条 */
|
| 123 |
+
.auto-refresh-bar {
|
| 124 |
+
background-color: var(--card-bg);
|
| 125 |
+
padding: 0.5rem 1rem;
|
| 126 |
+
margin-bottom: 1rem;
|
| 127 |
+
border-radius: 4px;
|
| 128 |
+
box-shadow: 0 1px 3px var(--shadow-color);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
.refresh-progress {
|
| 132 |
+
height: 4px;
|
| 133 |
+
background-color: rgba(0,0,0,0.1);
|
| 134 |
+
border-radius: 2px;
|
| 135 |
+
margin-bottom: 0.5rem;
|
| 136 |
+
overflow: hidden;
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
.progress-bar {
|
| 140 |
+
height: 100%;
|
| 141 |
+
background-color: var(--primary-color);
|
| 142 |
+
width: 0;
|
| 143 |
+
transition: width 1s linear;
|
| 144 |
+
border-radius: 2px;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
.refresh-info {
|
| 148 |
+
display: flex;
|
| 149 |
+
justify-content: space-between;
|
| 150 |
+
align-items: center;
|
| 151 |
+
font-size: 0.85rem;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
h1, h2, h3 {
|
| 155 |
+
color: var(--heading-color);
|
| 156 |
+
margin-bottom: 1rem;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
/* 仪表盘部分 */
|
| 160 |
+
.dashboard-section {
|
| 161 |
+
padding: 1rem 1.5rem;
|
| 162 |
+
display: none;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
.dashboard-section.active-section {
|
| 166 |
+
display: block;
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
.section-header {
|
| 170 |
+
display: flex;
|
| 171 |
+
justify-content: space-between;
|
| 172 |
+
align-items: center;
|
| 173 |
+
margin-bottom: 1.5rem;
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
.section-header h2 {
|
| 177 |
+
font-size: 1.5rem;
|
| 178 |
+
margin: 0;
|
| 179 |
+
display: flex;
|
| 180 |
+
align-items: center;
|
| 181 |
+
gap: 0.5rem;
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
.section-header h2 i {
|
| 185 |
+
color: var(--primary-color);
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
.time-info {
|
| 189 |
+
font-size: 0.9rem;
|
| 190 |
+
color: var(--text-color);
|
| 191 |
+
opacity: 0.8;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
.time-info span {
|
| 195 |
+
margin-right: 1rem;
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
.time-info i {
|
| 199 |
+
margin-right: 0.5rem;
|
| 200 |
+
color: var(--primary-color);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
.actions {
|
| 204 |
+
display: flex;
|
| 205 |
+
gap: 0.5rem;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
.save-button, .refresh-button {
|
| 209 |
+
background-color: var(--save-button);
|
| 210 |
+
color: white;
|
| 211 |
+
border: none;
|
| 212 |
+
padding: 0.5rem 1rem;
|
| 213 |
+
border-radius: 4px;
|
| 214 |
+
cursor: pointer;
|
| 215 |
+
font-weight: 600;
|
| 216 |
+
transition: all 0.3s ease;
|
| 217 |
+
display: flex;
|
| 218 |
+
align-items: center;
|
| 219 |
+
gap: 0.5rem;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
.save-button:hover {
|
| 223 |
+
background-color: var(--save-button-hover);
|
| 224 |
+
transform: translateY(-2px);
|
| 225 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
.refresh-button {
|
| 229 |
+
background-color: var(--refresh-button);
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
.refresh-button:hover {
|
| 233 |
+
background-color: var(--refresh-button-hover);
|
| 234 |
+
transform: translateY(-2px);
|
| 235 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
/* 统计卡片网格 */
|
| 239 |
+
.stats-overview {
|
| 240 |
+
display: grid;
|
| 241 |
+
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
| 242 |
+
gap: 1.5rem;
|
| 243 |
+
margin-bottom: 2rem;
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
/* 统计卡片样式 */
|
| 247 |
+
.stats-card {
|
| 248 |
+
background-color: var(--card-bg);
|
| 249 |
+
border-radius: 10px;
|
| 250 |
+
padding: 1.5rem;
|
| 251 |
+
box-shadow: 0 2px 5px var(--shadow-color);
|
| 252 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
| 253 |
+
border-top: 4px solid var(--primary-color);
|
| 254 |
+
position: relative;
|
| 255 |
+
overflow: hidden;
|
| 256 |
+
display: flex;
|
| 257 |
+
align-items: center;
|
| 258 |
+
gap: 1rem;
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
.stats-card.primary {
|
| 262 |
+
border-top-color: var(--primary-color);
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
.stats-card.success {
|
| 266 |
+
border-top-color: var(--success-color);
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
.stats-card.info {
|
| 270 |
+
border-top-color: var(--info-color);
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
.stats-card.warning {
|
| 274 |
+
border-top-color: var(--warning-color);
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
.stats-card.danger {
|
| 278 |
+
border-top-color: var(--danger-color);
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
.stats-card.secondary {
|
| 282 |
+
border-top-color: var(--secondary-color);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
.stats-icon {
|
| 286 |
+
width: 50px;
|
| 287 |
+
height: 50px;
|
| 288 |
+
border-radius: 50%;
|
| 289 |
+
background-color: rgba(52, 152, 219, 0.1);
|
| 290 |
+
display: flex;
|
| 291 |
+
align-items: center;
|
| 292 |
+
justify-content: center;
|
| 293 |
+
font-size: 1.5rem;
|
| 294 |
+
color: var(--primary-color);
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
.stats-card.primary .stats-icon {
|
| 298 |
+
background-color: rgba(52, 152, 219, 0.1);
|
| 299 |
+
color: var(--primary-color);
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
.stats-card.success .stats-icon {
|
| 303 |
+
background-color: rgba(39, 174, 96, 0.1);
|
| 304 |
+
color: var(--success-color);
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
.stats-card.info .stats-icon {
|
| 308 |
+
background-color: rgba(52, 152, 219, 0.1);
|
| 309 |
+
color: var(--info-color);
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
.stats-card.warning .stats-icon {
|
| 313 |
+
background-color: rgba(243, 156, 18, 0.1);
|
| 314 |
+
color: var(--warning-color);
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
.stats-card.danger .stats-icon {
|
| 318 |
+
background-color: rgba(231, 76, 60, 0.1);
|
| 319 |
+
color: var(--danger-color);
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
.stats-card.secondary .stats-icon {
|
| 323 |
+
background-color: rgba(44, 62, 80, 0.1);
|
| 324 |
+
color: var(--secondary-color);
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
.stats-content {
|
| 328 |
+
flex: 1;
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
.stats-card::after {
|
| 332 |
+
content: '';
|
| 333 |
+
position: absolute;
|
| 334 |
+
bottom: 0;
|
| 335 |
+
right: 0;
|
| 336 |
+
width: 30%;
|
| 337 |
+
height: 4px;
|
| 338 |
+
background-color: var(--primary-color);
|
| 339 |
+
opacity: 0.3;
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
.stats-card:hover {
|
| 343 |
+
transform: translateY(-5px);
|
| 344 |
+
box-shadow: 0 5px 15px var(--shadow-color);
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
.stats-card h3 {
|
| 348 |
+
font-size: 1rem;
|
| 349 |
+
color: var(--text-color);
|
| 350 |
+
opacity: 0.8;
|
| 351 |
+
margin-bottom: 0.5rem;
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
.stats-number {
|
| 355 |
+
font-size: 2rem;
|
| 356 |
+
font-weight: bold;
|
| 357 |
+
color: var(--primary-color);
|
| 358 |
+
margin: 0.5rem 0;
|
| 359 |
+
display: flex;
|
| 360 |
+
align-items: center;
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
/* 图表布局 */
|
| 364 |
+
.dashboard-charts {
|
| 365 |
+
margin-top: 2rem;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
.chart-row {
|
| 369 |
+
display: grid;
|
| 370 |
+
grid-template-columns: 1fr 1fr;
|
| 371 |
+
gap: 1.5rem;
|
| 372 |
+
margin-bottom: 1.5rem;
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
.chart-card {
|
| 376 |
+
background-color: var(--card-bg);
|
| 377 |
+
border-radius: 10px;
|
| 378 |
+
box-shadow: 0 2px 5px var(--shadow-color);
|
| 379 |
+
overflow: hidden;
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
.chart-header {
|
| 383 |
+
display: flex;
|
| 384 |
+
justify-content: space-between;
|
| 385 |
+
align-items: center;
|
| 386 |
+
padding: 1rem 1.5rem;
|
| 387 |
+
border-bottom: 1px solid var(--border-color);
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
.chart-header h3 {
|
| 391 |
+
margin: 0;
|
| 392 |
+
font-size: 1.1rem;
|
| 393 |
+
display: flex;
|
| 394 |
+
align-items: center;
|
| 395 |
+
gap: 0.5rem;
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
.chart-header h3 i {
|
| 399 |
+
color: var(--primary-color);
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
.chart-body {
|
| 403 |
+
padding: 1rem;
|
| 404 |
+
height: 300px;
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
/* 表格样式 */
|
| 408 |
+
.table-container {
|
| 409 |
+
max-height: 500px;
|
| 410 |
+
overflow-y: auto;
|
| 411 |
+
border-radius: 10px;
|
| 412 |
+
box-shadow: 0 2px 5px var(--shadow-color);
|
| 413 |
+
margin-bottom: 1rem;
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
table {
|
| 417 |
+
width: 100%;
|
| 418 |
+
border-collapse: collapse;
|
| 419 |
+
margin-top: 1rem;
|
| 420 |
+
background-color: var(--card-bg);
|
| 421 |
+
border-radius: 10px;
|
| 422 |
+
overflow: hidden;
|
| 423 |
+
box-shadow: 0 2px 5px var(--shadow-color);
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
th, td {
|
| 427 |
+
padding: 1rem;
|
| 428 |
+
text-align: left;
|
| 429 |
+
border-bottom: 1px solid var(--table-border);
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
th {
|
| 433 |
+
background-color: var(--table-header-bg);
|
| 434 |
+
color: white;
|
| 435 |
+
font-weight: 600;
|
| 436 |
+
position: sticky;
|
| 437 |
+
top: 0;
|
| 438 |
+
z-index: 10;
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
th[data-sort] {
|
| 442 |
+
cursor: pointer;
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
th[data-sort] i {
|
| 446 |
+
margin-left: 0.5rem;
|
| 447 |
+
font-size: 0.8rem;
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
th.asc i, th.desc i {
|
| 451 |
+
color: #fff;
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
tr:last-child td {
|
| 455 |
+
border-bottom: none;
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
tr:hover {
|
| 459 |
+
background-color: var(--table-row-hover);
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
td.success {
|
| 463 |
+
color: var(--success-text);
|
| 464 |
+
font-weight: 600;
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
td.fail {
|
| 468 |
+
color: var(--fail-text);
|
| 469 |
+
font-weight: 600;
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
.history-section {
|
| 473 |
+
margin-top: 2rem;
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
.history-actions {
|
| 477 |
+
display: flex;
|
| 478 |
+
justify-content: space-between;
|
| 479 |
+
align-items: center;
|
| 480 |
+
margin-bottom: 1rem;
|
| 481 |
+
flex-wrap: wrap;
|
| 482 |
+
gap: 1rem;
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
.search-box {
|
| 486 |
+
position: relative;
|
| 487 |
+
flex: 1;
|
| 488 |
+
min-width: 200px;
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
.search-box input {
|
| 492 |
+
width: 100%;
|
| 493 |
+
padding: 0.5rem 1rem 0.5rem 2.5rem;
|
| 494 |
+
border: 1px solid var(--border-color);
|
| 495 |
+
border-radius: 4px;
|
| 496 |
+
font-size: 1rem;
|
| 497 |
+
background-color: var(--card-bg);
|
| 498 |
+
color: var(--text-color);
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
.search-box i {
|
| 502 |
+
position: absolute;
|
| 503 |
+
left: 0.8rem;
|
| 504 |
+
top: 50%;
|
| 505 |
+
transform: translateY(-50%);
|
| 506 |
+
color: var(--primary-color);
|
| 507 |
+
}
|
| 508 |
+
|
| 509 |
+
.pagination {
|
| 510 |
+
display: flex;
|
| 511 |
+
justify-content: space-between;
|
| 512 |
+
align-items: center;
|
| 513 |
+
margin-top: 1rem;
|
| 514 |
+
}
|
| 515 |
+
|
| 516 |
+
.pagination button {
|
| 517 |
+
background-color: var(--primary-color);
|
| 518 |
+
color: white;
|
| 519 |
+
border: none;
|
| 520 |
+
padding: 0.5rem 1rem;
|
| 521 |
+
border-radius: 4px;
|
| 522 |
+
cursor: pointer;
|
| 523 |
+
transition: background-color 0.3s ease;
|
| 524 |
+
display: flex;
|
| 525 |
+
align-items: center;
|
| 526 |
+
gap: 0.5rem;
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
.pagination button:disabled {
|
| 530 |
+
background-color: #ccc;
|
| 531 |
+
cursor: not-allowed;
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
.pagination button:not(:disabled):hover {
|
| 535 |
+
background-color: var(--button-hover);
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
#page-info {
|
| 539 |
+
font-size: 0.9rem;
|
| 540 |
+
color: var(--text-color);
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
/* 页脚样式 */
|
| 544 |
+
.main-footer {
|
| 545 |
+
margin-top: auto;
|
| 546 |
+
padding: 1rem 1.5rem;
|
| 547 |
+
border-top: 1px solid var(--border-color);
|
| 548 |
+
background-color: var(--card-bg);
|
| 549 |
+
box-shadow: 0 -2px 5px var(--shadow-color);
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
.footer-content {
|
| 553 |
+
display: flex;
|
| 554 |
+
justify-content: space-between;
|
| 555 |
+
align-items: center;
|
| 556 |
+
}
|
| 557 |
+
|
| 558 |
+
.footer-logo h3 {
|
| 559 |
+
margin: 0;
|
| 560 |
+
font-size: 1.2rem;
|
| 561 |
+
color: var(--primary-color);
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
.footer-logo h3 span {
|
| 565 |
+
font-weight: normal;
|
| 566 |
+
opacity: 0.8;
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
.footer-info {
|
| 570 |
+
font-size: 0.85rem;
|
| 571 |
+
opacity: 0.8;
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
#countdown {
|
| 575 |
+
font-weight: bold;
|
| 576 |
+
color: var(--primary-color);
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
/* 状态徽章样式 */
|
| 580 |
+
.status-badge {
|
| 581 |
+
display: inline-flex;
|
| 582 |
+
align-items: center;
|
| 583 |
+
gap: 0.3rem;
|
| 584 |
+
padding: 0.3rem 0.6rem;
|
| 585 |
+
border-radius: 20px;
|
| 586 |
+
font-size: 0.85rem;
|
| 587 |
+
font-weight: 600;
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
.status-badge.success {
|
| 591 |
+
background-color: rgba(39, 174, 96, 0.1);
|
| 592 |
+
color: var(--success-color);
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
.status-badge.fail {
|
| 596 |
+
background-color: rgba(231, 76, 60, 0.1);
|
| 597 |
+
color: var(--fail-text);
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
/* 模型徽章样式 */
|
| 601 |
+
.model-badge {
|
| 602 |
+
display: inline-block;
|
| 603 |
+
padding: 0.3rem 0.6rem;
|
| 604 |
+
border-radius: 20px;
|
| 605 |
+
font-size: 0.85rem;
|
| 606 |
+
background-color: rgba(52, 152, 219, 0.1);
|
| 607 |
+
color: var(--primary-color);
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
.model-badge.small {
|
| 611 |
+
font-size: 0.75rem;
|
| 612 |
+
padding: 0.2rem 0.4rem;
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
/* 账户头像样式 */
|
| 616 |
+
.account-avatar {
|
| 617 |
+
display: inline-flex;
|
| 618 |
+
align-items: center;
|
| 619 |
+
justify-content: center;
|
| 620 |
+
width: 30px;
|
| 621 |
+
height: 30px;
|
| 622 |
+
border-radius: 50%;
|
| 623 |
+
background-color: var(--primary-color);
|
| 624 |
+
color: white;
|
| 625 |
+
font-weight: bold;
|
| 626 |
+
}
|
| 627 |
+
|
| 628 |
+
.account-avatar.small {
|
| 629 |
+
width: 24px;
|
| 630 |
+
height: 24px;
|
| 631 |
+
font-size: 0.8rem;
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
.account-cell {
|
| 635 |
+
display: flex;
|
| 636 |
+
align-items: center;
|
| 637 |
+
gap: 0.5rem;
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
/* 趋势指示器 */
|
| 641 |
+
.stats-trend {
|
| 642 |
+
display: flex;
|
| 643 |
+
align-items: center;
|
| 644 |
+
gap: 0.3rem;
|
| 645 |
+
font-size: 0.85rem;
|
| 646 |
+
margin-top: 0.5rem;
|
| 647 |
+
}
|
| 648 |
+
|
| 649 |
+
.stats-trend.positive {
|
| 650 |
+
color: var(--success-color);
|
| 651 |
+
}
|
| 652 |
+
|
| 653 |
+
.stats-trend.negative {
|
| 654 |
+
color: var(--danger-color);
|
| 655 |
+
}
|
| 656 |
+
|
| 657 |
+
.stats-detail {
|
| 658 |
+
font-size: 0.85rem;
|
| 659 |
+
opacity: 0.8;
|
| 660 |
+
margin-top: 0.5rem;
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
/* 响应式设计优化 */
|
| 664 |
+
@media (max-width: 992px) {
|
| 665 |
+
.chart-row {
|
| 666 |
+
grid-template-columns: 1fr;
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
.stats-overview {
|
| 670 |
+
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
| 671 |
+
}
|
| 672 |
+
}
|
| 673 |
+
|
| 674 |
+
@media (max-width: 768px) {
|
| 675 |
+
.stats-overview {
|
| 676 |
+
grid-template-columns: 1fr;
|
| 677 |
+
}
|
| 678 |
+
|
| 679 |
+
.chart-body {
|
| 680 |
+
height: 250px;
|
| 681 |
+
}
|
| 682 |
+
|
| 683 |
+
table {
|
| 684 |
+
display: block;
|
| 685 |
+
overflow-x: auto;
|
| 686 |
+
}
|
| 687 |
+
|
| 688 |
+
.history-actions {
|
| 689 |
+
flex-direction: column;
|
| 690 |
+
align-items: stretch;
|
| 691 |
+
}
|
| 692 |
+
|
| 693 |
+
.footer-content {
|
| 694 |
+
flex-direction: column;
|
| 695 |
+
gap: 1rem;
|
| 696 |
+
text-align: center;
|
| 697 |
+
}
|
| 698 |
+
}
|
static/js/scripts.js
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// 全局变量
|
| 2 |
+
let refreshInterval = 60; // 默认刷新间隔(秒)
|
| 3 |
+
let autoRefreshEnabled = true; // 默认启用自动刷新
|
| 4 |
+
let chartInstances = {}; // 存储图表实例的对象
|
| 5 |
+
let darkModeEnabled = localStorage.getItem('theme') === 'dark'; // 深色模式状态
|
| 6 |
+
|
| 7 |
+
// 格式化大数值的函数
|
| 8 |
+
function formatChartNumber(value) {
|
| 9 |
+
if (value >= 1000000000) {
|
| 10 |
+
return (value / 1000000000).toFixed(1) + 'G';
|
| 11 |
+
} else if (value >= 1000000) {
|
| 12 |
+
return (value / 1000000).toFixed(1) + 'M';
|
| 13 |
+
} else if (value >= 1000) {
|
| 14 |
+
return (value / 1000).toFixed(1) + 'K';
|
| 15 |
+
}
|
| 16 |
+
return value;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
// 页面加载完成后执行
|
| 20 |
+
document.addEventListener('DOMContentLoaded', function() {
|
| 21 |
+
// 初始化图表
|
| 22 |
+
initializeCharts();
|
| 23 |
+
|
| 24 |
+
// 设置自动刷新
|
| 25 |
+
setupAutoRefresh();
|
| 26 |
+
|
| 27 |
+
// 主题切换
|
| 28 |
+
setupThemeToggle();
|
| 29 |
+
|
| 30 |
+
// 加载保存的主题
|
| 31 |
+
loadSavedTheme();
|
| 32 |
+
|
| 33 |
+
// 添加表格交互功能
|
| 34 |
+
enhanceTableInteraction();
|
| 35 |
+
|
| 36 |
+
// 添加保存统计数据按钮事件
|
| 37 |
+
setupSaveStatsButton();
|
| 38 |
+
|
| 39 |
+
// 更新页脚信息
|
| 40 |
+
updateFooterInfo();
|
| 41 |
+
|
| 42 |
+
// 表格排序和筛选
|
| 43 |
+
const table = document.getElementById('history-table');
|
| 44 |
+
if (table) {
|
| 45 |
+
const headers = table.querySelectorAll('th[data-sort]');
|
| 46 |
+
const rows = Array.from(table.querySelectorAll('tbody tr'));
|
| 47 |
+
const rowsPerPage = 10;
|
| 48 |
+
let currentPage = 1;
|
| 49 |
+
let filteredRows = [...rows];
|
| 50 |
+
|
| 51 |
+
// 初始化分页
|
| 52 |
+
function initPagination() {
|
| 53 |
+
const totalPages = Math.ceil(filteredRows.length / rowsPerPage);
|
| 54 |
+
document.getElementById('total-pages').textContent = totalPages;
|
| 55 |
+
document.getElementById('current-page').textContent = currentPage;
|
| 56 |
+
document.getElementById('prev-page').disabled = currentPage === 1;
|
| 57 |
+
document.getElementById('next-page').disabled = currentPage === totalPages || totalPages === 0;
|
| 58 |
+
|
| 59 |
+
// 显示当前页的行
|
| 60 |
+
const startIndex = (currentPage - 1) * rowsPerPage;
|
| 61 |
+
const endIndex = startIndex + rowsPerPage;
|
| 62 |
+
|
| 63 |
+
rows.forEach(row => row.style.display = 'none');
|
| 64 |
+
filteredRows.slice(startIndex, endIndex).forEach(row => row.style.display = '');
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
// 排序功能
|
| 68 |
+
headers.forEach(header => {
|
| 69 |
+
header.addEventListener('click', () => {
|
| 70 |
+
const sortBy = header.getAttribute('data-sort');
|
| 71 |
+
const isAscending = header.classList.contains('asc');
|
| 72 |
+
|
| 73 |
+
// 移除所有排序指示器
|
| 74 |
+
headers.forEach(h => {
|
| 75 |
+
h.classList.remove('asc', 'desc');
|
| 76 |
+
h.querySelector('i').className = 'fas fa-sort';
|
| 77 |
+
});
|
| 78 |
+
|
| 79 |
+
// 设置当前排序方向
|
| 80 |
+
if (isAscending) {
|
| 81 |
+
header.classList.add('desc');
|
| 82 |
+
header.querySelector('i').className = 'fas fa-sort-down';
|
| 83 |
+
} else {
|
| 84 |
+
header.classList.add('asc');
|
| 85 |
+
header.querySelector('i').className = 'fas fa-sort-up';
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
// 排序行
|
| 89 |
+
filteredRows.sort((a, b) => {
|
| 90 |
+
let aValue, bValue;
|
| 91 |
+
|
| 92 |
+
if (sortBy === 'id') {
|
| 93 |
+
aValue = a.cells[0].getAttribute('title');
|
| 94 |
+
bValue = b.cells[0].getAttribute('title');
|
| 95 |
+
} else if (sortBy === 'timestamp') {
|
| 96 |
+
aValue = a.cells[1].textContent;
|
| 97 |
+
bValue = b.cells[1].textContent;
|
| 98 |
+
} else if (sortBy === 'duration' || sortBy === 'total') {
|
| 99 |
+
const aText = a.cells[sortBy === 'duration' ? 5 : 6].textContent;
|
| 100 |
+
const bText = b.cells[sortBy === 'duration' ? 5 : 6].textContent;
|
| 101 |
+
aValue = aText === '-' ? 0 : parseInt(aText.replace(/,/g, '').replace(/[KMG]/g, ''));
|
| 102 |
+
bValue = bText === '-' ? 0 : parseInt(bText.replace(/,/g, '').replace(/[KMG]/g, ''));
|
| 103 |
+
} else {
|
| 104 |
+
aValue = a.cells[sortBy === 'model' ? 2 : (sortBy === 'account' ? 3 : 4)].textContent;
|
| 105 |
+
bValue = b.cells[sortBy === 'model' ? 2 : (sortBy === 'account' ? 3 : 4)].textContent;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
if (aValue < bValue) return isAscending ? -1 : 1;
|
| 109 |
+
if (aValue > bValue) return isAscending ? 1 : -1;
|
| 110 |
+
return 0;
|
| 111 |
+
});
|
| 112 |
+
|
| 113 |
+
// 更新显示
|
| 114 |
+
currentPage = 1;
|
| 115 |
+
initPagination();
|
| 116 |
+
});
|
| 117 |
+
});
|
| 118 |
+
|
| 119 |
+
// 搜索功能
|
| 120 |
+
const searchInput = document.getElementById('history-search');
|
| 121 |
+
if (searchInput) {
|
| 122 |
+
searchInput.addEventListener('input', function() {
|
| 123 |
+
const searchTerm = this.value.toLowerCase();
|
| 124 |
+
|
| 125 |
+
filteredRows = rows.filter(row => {
|
| 126 |
+
const rowText = Array.from(row.cells).map(cell => cell.textContent.toLowerCase()).join(' ');
|
| 127 |
+
return rowText.includes(searchTerm);
|
| 128 |
+
});
|
| 129 |
+
|
| 130 |
+
currentPage = 1;
|
| 131 |
+
initPagination();
|
| 132 |
+
});
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
// 分页控制
|
| 136 |
+
const prevPageBtn = document.getElementById('prev-page');
|
| 137 |
+
const nextPageBtn = document.getElementById('next-page');
|
| 138 |
+
|
| 139 |
+
if (prevPageBtn) {
|
| 140 |
+
prevPageBtn.addEventListener('click', () => {
|
| 141 |
+
if (currentPage > 1) {
|
| 142 |
+
currentPage--;
|
| 143 |
+
initPagination();
|
| 144 |
+
}
|
| 145 |
+
});
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
if (nextPageBtn) {
|
| 149 |
+
nextPageBtn.addEventListener('click', () => {
|
| 150 |
+
const totalPages = Math.ceil(filteredRows.length / rowsPerPage);
|
| 151 |
+
if (currentPage < totalPages) {
|
| 152 |
+
currentPage++;
|
| 153 |
+
initPagination();
|
| 154 |
+
}
|
| 155 |
+
});
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
// 初始化表格
|
| 159 |
+
initPagination();
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
// 刷新按钮
|
| 163 |
+
const refreshBtn = document.getElementById('refresh-btn');
|
| 164 |
+
if (refreshBtn) {
|
| 165 |
+
refreshBtn.addEventListener('click', () => {
|
| 166 |
+
location.reload();
|
| 167 |
+
});
|
| 168 |
+
}
|
| 169 |
+
});
|
| 170 |
+
|
| 171 |
+
// 初始化图表
|
| 172 |
+
function initializeCharts() {
|
| 173 |
+
try {
|
| 174 |
+
// 注册Chart.js插件
|
| 175 |
+
Chart.register(ChartDataLabels);
|
| 176 |
+
|
| 177 |
+
// 设置全局默认值
|
| 178 |
+
Chart.defaults.font.family = 'Nunito, sans-serif';
|
| 179 |
+
Chart.defaults.color = getComputedStyle(document.documentElement).getPropertyValue('--text-color');
|
| 180 |
+
|
| 181 |
+
// 每日请求趋势图表
|
| 182 |
+
const dailyChartElement = document.getElementById('dailyChart');
|
| 183 |
+
if (dailyChartElement) {
|
| 184 |
+
const labels = JSON.parse(dailyChartElement.dataset.labels || '[]');
|
| 185 |
+
const values = JSON.parse(dailyChartElement.dataset.values || '[]');
|
| 186 |
+
|
| 187 |
+
const dailyChart = new Chart(dailyChartElement, {
|
| 188 |
+
type: 'line',
|
| 189 |
+
data: {
|
| 190 |
+
labels: labels,
|
| 191 |
+
datasets: [{
|
| 192 |
+
label: '请求数',
|
| 193 |
+
data: values,
|
| 194 |
+
backgroundColor: 'rgba(52, 152, 219, 0.2)',
|
| 195 |
+
borderColor: 'rgba(52, 152, 219, 1)',
|
| 196 |
+
borderWidth: 2,
|
| 197 |
+
pointBackgroundColor: 'rgba(52, 152, 219, 1)',
|
| 198 |
+
pointRadius: 4,
|
| 199 |
+
tension: 0.3,
|
| 200 |
+
fill: true
|
| 201 |
+
}]
|
| 202 |
+
},
|
| 203 |
+
options: {
|
| 204 |
+
responsive: true,
|
| 205 |
+
maintainAspectRatio: false,
|
| 206 |
+
plugins: {
|
| 207 |
+
legend: {
|
| 208 |
+
display: false
|
| 209 |
+
},
|
| 210 |
+
tooltip: {
|
| 211 |
+
mode: 'index',
|
| 212 |
+
intersect: false,
|
| 213 |
+
backgroundColor: 'rgba(0, 0, 0, 0.7)',
|
| 214 |
+
titleFont: {
|
| 215 |
+
size: 14
|
| 216 |
+
},
|
| 217 |
+
bodyFont: {
|
| 218 |
+
size: 13
|
| 219 |
+
},
|
| 220 |
+
padding: 10,
|
| 221 |
+
displayColors: false
|
| 222 |
+
},
|
| 223 |
+
datalabels: {
|
| 224 |
+
display: false
|
| 225 |
+
}
|
| 226 |
+
},
|
| 227 |
+
scales: {
|
| 228 |
+
x: {
|
| 229 |
+
grid: {
|
| 230 |
+
display: false
|
| 231 |
+
},
|
| 232 |
+
ticks: {
|
| 233 |
+
maxRotation: 45,
|
| 234 |
+
minRotation: 45
|
| 235 |
+
}
|
| 236 |
+
},
|
| 237 |
+
y: {
|
| 238 |
+
beginAtZero: true,
|
| 239 |
+
grid: {
|
| 240 |
+
color: 'rgba(200, 200, 200, 0.1)'
|
| 241 |
+
},
|
| 242 |
+
ticks: {
|
| 243 |
+
precision: 0
|
| 244 |
+
}
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
}
|
| 248 |
+
});
|
| 249 |
+
|
| 250 |
+
chartInstances['dailyChart'] = dailyChart;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
// 模型使用分布图表
|
| 254 |
+
const modelChartElement = document.getElementById('modelChart');
|
| 255 |
+
if (modelChartElement) {
|
| 256 |
+
const labels = JSON.parse(modelChartElement.dataset.labels || '[]');
|
| 257 |
+
const values = JSON.parse(modelChartElement.dataset.values || '[]');
|
| 258 |
+
|
| 259 |
+
const modelChart = new Chart(modelChartElement, {
|
| 260 |
+
type: 'pie',
|
| 261 |
+
data: {
|
| 262 |
+
labels: labels,
|
| 263 |
+
datasets: [{
|
| 264 |
+
label: '模型使用次数',
|
| 265 |
+
data: values,
|
| 266 |
+
backgroundColor: [
|
| 267 |
+
'rgba(255, 99, 132, 0.5)',
|
| 268 |
+
'rgba(54, 162, 235, 0.5)',
|
| 269 |
+
'rgba(255, 206, 86, 0.5)',
|
| 270 |
+
'rgba(75, 192, 192, 0.5)',
|
| 271 |
+
'rgba(153, 102, 255, 0.5)',
|
| 272 |
+
'rgba(255, 159, 64, 0.5)',
|
| 273 |
+
'rgba(199, 199, 199, 0.5)',
|
| 274 |
+
'rgba(83, 102, 255, 0.5)',
|
| 275 |
+
'rgba(40, 159, 64, 0.5)',
|
| 276 |
+
'rgba(210, 199, 199, 0.5)'
|
| 277 |
+
],
|
| 278 |
+
borderColor: [
|
| 279 |
+
'rgba(255, 99, 132, 1)',
|
| 280 |
+
'rgba(54, 162, 235, 1)',
|
| 281 |
+
'rgba(255, 206, 86, 1)',
|
| 282 |
+
'rgba(75, 192, 192, 1)',
|
| 283 |
+
'rgba(153, 102, 255, 1)',
|
| 284 |
+
'rgba(255, 159, 64, 1)',
|
| 285 |
+
'rgba(199, 199, 199, 1)',
|
| 286 |
+
'rgba(83, 102, 255, 1)',
|
| 287 |
+
'rgba(40, 159, 64, 1)',
|
| 288 |
+
'rgba(210, 199, 199, 1)'
|
| 289 |
+
],
|
| 290 |
+
borderWidth: 1
|
| 291 |
+
}]
|
| 292 |
+
},
|
| 293 |
+
options: {
|
| 294 |
+
responsive: true,
|
| 295 |
+
maintainAspectRatio: false,
|
| 296 |
+
plugins: {
|
| 297 |
+
tooltip: {
|
| 298 |
+
callbacks: {
|
| 299 |
+
label: function(context) {
|
| 300 |
+
let label = context.label || '';
|
| 301 |
+
if (label) {
|
| 302 |
+
label += ': ';
|
| 303 |
+
}
|
| 304 |
+
label += formatChartNumber(context.parsed);
|
| 305 |
+
return label;
|
| 306 |
+
}
|
| 307 |
+
}
|
| 308 |
+
}
|
| 309 |
+
}
|
| 310 |
+
}
|
| 311 |
+
});
|
| 312 |
+
|
| 313 |
+
chartInstances['modelChart'] = modelChart;
|
| 314 |
+
}
|
| 315 |
+
} catch (error) {
|
| 316 |
+
console.error('初始化图表失败:', error);
|
| 317 |
+
}
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
// 设置自动刷新功能
|
| 321 |
+
function setupAutoRefresh() {
|
| 322 |
+
// 获取已有的刷新进度条元素
|
| 323 |
+
const progressBar = document.getElementById('refresh-progress-bar');
|
| 324 |
+
const countdownElement = document.getElementById('countdown');
|
| 325 |
+
let countdownTimer;
|
| 326 |
+
|
| 327 |
+
// 倒计时功能
|
| 328 |
+
let countdown = refreshInterval;
|
| 329 |
+
|
| 330 |
+
function startCountdown() {
|
| 331 |
+
if (countdownTimer) clearInterval(countdownTimer);
|
| 332 |
+
|
| 333 |
+
countdown = refreshInterval;
|
| 334 |
+
countdownElement.textContent = countdown;
|
| 335 |
+
|
| 336 |
+
// 重置进度条
|
| 337 |
+
progressBar.style.width = '100%';
|
| 338 |
+
|
| 339 |
+
if (autoRefreshEnabled) {
|
| 340 |
+
// 设置进度条动画
|
| 341 |
+
progressBar.style.transition = `width ${refreshInterval}s linear`;
|
| 342 |
+
progressBar.style.width = '0%';
|
| 343 |
+
|
| 344 |
+
countdownTimer = setInterval(function() {
|
| 345 |
+
countdown--;
|
| 346 |
+
if (countdown <= 0) {
|
| 347 |
+
countdown = refreshInterval;
|
| 348 |
+
location.reload();
|
| 349 |
+
}
|
| 350 |
+
countdownElement.textContent = countdown;
|
| 351 |
+
}, 1000);
|
| 352 |
+
} else {
|
| 353 |
+
// 暂停进度条动画
|
| 354 |
+
progressBar.style.transition = 'none';
|
| 355 |
+
progressBar.style.width = '0%';
|
| 356 |
+
}
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
// 立即启动倒计时
|
| 360 |
+
startCountdown();
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
// 设置主题切换
|
| 364 |
+
function setupThemeToggle() {
|
| 365 |
+
// 在简化版中,我们移除了主题切换按钮,但保留功能以备将来使用
|
| 366 |
+
const themeToggleBtn = document.getElementById('theme-toggle-btn');
|
| 367 |
+
if (themeToggleBtn) {
|
| 368 |
+
themeToggleBtn.addEventListener('click', function() {
|
| 369 |
+
document.body.classList.toggle('dark-mode');
|
| 370 |
+
darkModeEnabled = document.body.classList.contains('dark-mode');
|
| 371 |
+
|
| 372 |
+
localStorage.setItem('theme', darkModeEnabled ? 'dark' : 'light');
|
| 373 |
+
|
| 374 |
+
// 更新所有图表的颜色
|
| 375 |
+
updateChartsTheme();
|
| 376 |
+
});
|
| 377 |
+
}
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
// 加载保存的主题
|
| 381 |
+
function loadSavedTheme() {
|
| 382 |
+
if (darkModeEnabled) {
|
| 383 |
+
document.body.classList.add('dark-mode');
|
| 384 |
+
const themeToggleBtn = document.querySelector('#theme-toggle-btn i');
|
| 385 |
+
if (themeToggleBtn) {
|
| 386 |
+
themeToggleBtn.classList.remove('fa-moon');
|
| 387 |
+
themeToggleBtn.classList.add('fa-sun');
|
| 388 |
+
}
|
| 389 |
+
}
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
// 更新图表主题
|
| 393 |
+
function updateChartsTheme() {
|
| 394 |
+
// 更新所有图表的颜色主题
|
| 395 |
+
Object.values(chartInstances).forEach(chart => {
|
| 396 |
+
// 更新网格线颜色
|
| 397 |
+
if (chart.options.scales && chart.options.scales.y) {
|
| 398 |
+
chart.options.scales.y.grid.color = darkModeEnabled ? 'rgba(255, 255, 255, 0.1)' : 'rgba(0, 0, 0, 0.1)';
|
| 399 |
+
chart.options.scales.x.grid.color = darkModeEnabled ? 'rgba(255, 255, 255, 0.1)' : 'rgba(0, 0, 0, 0.1)';
|
| 400 |
+
|
| 401 |
+
// 更新刻度颜色
|
| 402 |
+
chart.options.scales.y.ticks.color = darkModeEnabled ? '#ddd' : '#666';
|
| 403 |
+
chart.options.scales.x.ticks.color = darkModeEnabled ? '#ddd' : '#666';
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
// 更新图例颜色
|
| 407 |
+
if (chart.options.plugins && chart.options.plugins.legend) {
|
| 408 |
+
chart.options.plugins.legend.labels.color = darkModeEnabled ? '#ddd' : '#666';
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
chart.update();
|
| 412 |
+
});
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
// 设置保存统计数据按钮事件
|
| 416 |
+
function setupSaveStatsButton() {
|
| 417 |
+
const saveButton = document.querySelector('.save-button');
|
| 418 |
+
if (saveButton) {
|
| 419 |
+
// 添加点击动画效果
|
| 420 |
+
saveButton.addEventListener('click', function() {
|
| 421 |
+
this.classList.add('saving');
|
| 422 |
+
setTimeout(() => {
|
| 423 |
+
this.classList.remove('saving');
|
| 424 |
+
}, 1000);
|
| 425 |
+
});
|
| 426 |
+
}
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
// 添加表格交互功能
|
| 430 |
+
function enhanceTableInteraction() {
|
| 431 |
+
// 为请求历史表格添加高亮效果
|
| 432 |
+
const historyRows = document.querySelectorAll('#history-table tbody tr');
|
| 433 |
+
historyRows.forEach(row => {
|
| 434 |
+
row.addEventListener('mouseenter', function() {
|
| 435 |
+
this.classList.add('highlight');
|
| 436 |
+
});
|
| 437 |
+
|
| 438 |
+
row.addEventListener('mouseleave', function() {
|
| 439 |
+
this.classList.remove('highlight');
|
| 440 |
+
});
|
| 441 |
+
});
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
// 更新页脚信息
|
| 445 |
+
function updateFooterInfo() {
|
| 446 |
+
const footer = document.querySelector('.main-footer');
|
| 447 |
+
if (!footer) return;
|
| 448 |
+
|
| 449 |
+
// 获取当前年份
|
| 450 |
+
const currentYear = new Date().getFullYear();
|
| 451 |
+
|
| 452 |
+
// 更新版权年份
|
| 453 |
+
const copyrightText = footer.querySelector('p:first-child');
|
| 454 |
+
if (copyrightText) {
|
| 455 |
+
copyrightText.textContent = `© ${currentYear} 2API 统计面板 | 版本 1.0.1`;
|
| 456 |
+
}
|
| 457 |
+
}
|
templates/stats.html
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="zh-CN">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<meta http-equiv="refresh" content="60">
|
| 7 |
+
<title>2API 用量统计</title>
|
| 8 |
+
<link rel="stylesheet" href="{{ url_for('static', filename='css/styles.css') }}">
|
| 9 |
+
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
| 10 |
+
<script src="https://cdn.jsdelivr.net/npm/chartjs-plugin-datalabels@2.0.0"></script>
|
| 11 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css">
|
| 12 |
+
<link href="https://fonts.googleapis.com/css2?family=Nunito:wght@300;400;600;700&display=swap" rel="stylesheet">
|
| 13 |
+
</head>
|
| 14 |
+
<body>
|
| 15 |
+
<div class="dashboard-wrapper">
|
| 16 |
+
<header class="main-header">
|
| 17 |
+
<div class="header-left">
|
| 18 |
+
<h1><i class="fas fa-chart-line"></i> 2API 监控面板</h1>
|
| 19 |
+
</div>
|
| 20 |
+
<div class="header-right">
|
| 21 |
+
<div class="time-info">
|
| 22 |
+
<span><i class="fas fa-clock"></i> 最后更新: {{ current_time }}</span>
|
| 23 |
+
<span><i class="fas fa-save"></i> 最后保存: {{ stats.last_saved|format_datetime if stats.last_saved != "从未保存" else "从未保存" }}</span>
|
| 24 |
+
</div>
|
| 25 |
+
<div class="actions">
|
| 26 |
+
<form action="/save_stats" method="post">
|
| 27 |
+
<button type="submit" class="save-button" title="保存统计数据"><i class="fas fa-save"></i></button>
|
| 28 |
+
</form>
|
| 29 |
+
<button id="refresh-btn" class="refresh-button" title="刷新数据"><i class="fas fa-sync-alt"></i></button>
|
| 30 |
+
</div>
|
| 31 |
+
</div>
|
| 32 |
+
</header>
|
| 33 |
+
|
| 34 |
+
<div class="main-content">
|
| 35 |
+
<div class="auto-refresh-bar">
|
| 36 |
+
<div class="refresh-progress">
|
| 37 |
+
<div class="progress-bar" id="refresh-progress-bar" style="width: 100%;"></div>
|
| 38 |
+
</div>
|
| 39 |
+
<div class="refresh-info">
|
| 40 |
+
<span>数据将在 <span id="countdown">60</span> 秒后自动刷新</span>
|
| 41 |
+
</div>
|
| 42 |
+
</div>
|
| 43 |
+
|
| 44 |
+
<!-- 统计概览部分 -->
|
| 45 |
+
<section id="dashboard" class="dashboard-section active-section">
|
| 46 |
+
<div class="section-header">
|
| 47 |
+
<h2><i class="fas fa-tachometer-alt"></i> 统计概览</h2>
|
| 48 |
+
</div>
|
| 49 |
+
|
| 50 |
+
<div class="stats-overview">
|
| 51 |
+
<div class="stats-card primary">
|
| 52 |
+
<div class="stats-icon">
|
| 53 |
+
<i class="fas fa-server"></i>
|
| 54 |
+
</div>
|
| 55 |
+
<div class="stats-content">
|
| 56 |
+
<h3>总请求数</h3>
|
| 57 |
+
<div class="stats-number">{{ stats.total_requests|format_number }}</div>
|
| 58 |
+
<div class="stats-trend positive">
|
| 59 |
+
<i class="fas fa-arrow-up"></i>
|
| 60 |
+
{{ stats.growth_rate|round(2) }}% 今日
|
| 61 |
+
</div>
|
| 62 |
+
</div>
|
| 63 |
+
</div>
|
| 64 |
+
|
| 65 |
+
<div class="stats-card success">
|
| 66 |
+
<div class="stats-icon">
|
| 67 |
+
<i class="fas fa-check-circle"></i>
|
| 68 |
+
</div>
|
| 69 |
+
<div class="stats-content">
|
| 70 |
+
<h3>成功率</h3>
|
| 71 |
+
<div class="stats-number">{{ stats.success_rate }}%</div>
|
| 72 |
+
<div class="stats-detail">
|
| 73 |
+
成功: {{ stats.successful_requests|format_number }} / 失败: {{ stats.failed_requests|format_number }}
|
| 74 |
+
</div>
|
| 75 |
+
</div>
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
<div class="stats-card info">
|
| 79 |
+
<div class="stats-icon">
|
| 80 |
+
<i class="fas fa-bolt"></i>
|
| 81 |
+
</div>
|
| 82 |
+
<div class="stats-content">
|
| 83 |
+
<h3>平均响应时间</h3>
|
| 84 |
+
<div class="stats-number">
|
| 85 |
+
{{ stats.avg_duration|format_duration }}
|
| 86 |
+
</div>
|
| 87 |
+
<div class="stats-detail">
|
| 88 |
+
最快: {{ stats.min_duration|format_duration }}
|
| 89 |
+
</div>
|
| 90 |
+
</div>
|
| 91 |
+
</div>
|
| 92 |
+
|
| 93 |
+
<div class="stats-card warning">
|
| 94 |
+
<div class="stats-icon">
|
| 95 |
+
<i class="fas fa-coins"></i>
|
| 96 |
+
</div>
|
| 97 |
+
<div class="stats-content">
|
| 98 |
+
<h3>总 Tokens</h3>
|
| 99 |
+
<div class="stats-number">{{ stats.total_tokens|format_number }}</div>
|
| 100 |
+
<div class="stats-detail">
|
| 101 |
+
提示: {{ stats.total_prompt_tokens|format_number }} / 完成: {{ stats.total_completion_tokens|format_number }}
|
| 102 |
+
</div>
|
| 103 |
+
</div>
|
| 104 |
+
</div>
|
| 105 |
+
|
| 106 |
+
<div class="stats-card danger">
|
| 107 |
+
<div class="stats-icon">
|
| 108 |
+
<i class="fas fa-dollar-sign"></i>
|
| 109 |
+
</div>
|
| 110 |
+
<div class="stats-content">
|
| 111 |
+
<h3>估算成本</h3>
|
| 112 |
+
<div class="stats-number">
|
| 113 |
+
${{ stats.total_cost | round(2) }}
|
| 114 |
+
</div>
|
| 115 |
+
<div class="stats-detail">
|
| 116 |
+
平均: ${{ stats.avg_cost | round(2) }}/请求
|
| 117 |
+
</div>
|
| 118 |
+
</div>
|
| 119 |
+
</div>
|
| 120 |
+
|
| 121 |
+
<div class="stats-card secondary">
|
| 122 |
+
<div class="stats-icon">
|
| 123 |
+
<i class="fas fa-robot"></i>
|
| 124 |
+
</div>
|
| 125 |
+
<div class="stats-content">
|
| 126 |
+
<h3>模型使用</h3>
|
| 127 |
+
<div class="stats-number">{{ stats.model_usage.keys()|list|length }}</div>
|
| 128 |
+
<div class="stats-detail">
|
| 129 |
+
{% if stats.top_model %}
|
| 130 |
+
最常用: {{ stats.top_model[0] }} ({{ stats.top_model[1] }}次)
|
| 131 |
+
{% else %}
|
| 132 |
+
暂无模型使用数据
|
| 133 |
+
{% endif %}
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
</div>
|
| 137 |
+
</div>
|
| 138 |
+
|
| 139 |
+
<!-- 简化的图表部分 -->
|
| 140 |
+
<div class="dashboard-charts">
|
| 141 |
+
<div class="chart-row">
|
| 142 |
+
<div class="chart-card">
|
| 143 |
+
<div class="chart-header">
|
| 144 |
+
<h3><i class="fas fa-calendar-day"></i> 每日请求趋势</h3>
|
| 145 |
+
</div>
|
| 146 |
+
<div class="chart-body">
|
| 147 |
+
<canvas id="dailyChart"
|
| 148 |
+
data-labels='{{ stats.daily_usage.keys()|list|tojson }}'
|
| 149 |
+
data-values='{{ stats.daily_usage.values()|list|tojson }}'></canvas>
|
| 150 |
+
</div>
|
| 151 |
+
</div>
|
| 152 |
+
|
| 153 |
+
<div class="chart-card">
|
| 154 |
+
<div class="chart-header">
|
| 155 |
+
<h3><i class="fas fa-robot"></i> 模型使用分布</h3>
|
| 156 |
+
</div>
|
| 157 |
+
<div class="chart-body">
|
| 158 |
+
<canvas id="modelChart"
|
| 159 |
+
data-labels='{{ stats.model_usage.keys()|list|tojson }}'
|
| 160 |
+
data-values='{{ stats.model_usage.values()|list|tojson }}'></canvas>
|
| 161 |
+
</div>
|
| 162 |
+
</div>
|
| 163 |
+
</div>
|
| 164 |
+
</div>
|
| 165 |
+
</section>
|
| 166 |
+
|
| 167 |
+
<!-- 简化的请求历史部分 -->
|
| 168 |
+
<section id="history" class="dashboard-section">
|
| 169 |
+
<div class="section-header">
|
| 170 |
+
<h2><i class="fas fa-history"></i> 请求历史</h2>
|
| 171 |
+
<div class="history-actions">
|
| 172 |
+
<div class="search-box">
|
| 173 |
+
<input type="text" id="history-search" placeholder="搜索请求...">
|
| 174 |
+
<i class="fas fa-search"></i>
|
| 175 |
+
</div>
|
| 176 |
+
</div>
|
| 177 |
+
</div>
|
| 178 |
+
|
| 179 |
+
<div class="table-container">
|
| 180 |
+
<table id="history-table" class="data-table">
|
| 181 |
+
<thead>
|
| 182 |
+
<tr>
|
| 183 |
+
<th data-sort="id">请求ID <i class="fas fa-sort"></i></th>
|
| 184 |
+
<th data-sort="timestamp">时间 <i class="fas fa-sort"></i></th>
|
| 185 |
+
<th data-sort="model">模型 <i class="fas fa-sort"></i></th>
|
| 186 |
+
<th data-sort="account">账户 <i class="fas fa-sort"></i></th>
|
| 187 |
+
<th data-sort="status">状态 <i class="fas fa-sort"></i></th>
|
| 188 |
+
<th data-sort="duration">耗时(ms) <i class="fas fa-sort"></i></th>
|
| 189 |
+
<th data-sort="total">总Tokens <i class="fas fa-sort"></i></th>
|
| 190 |
+
</tr>
|
| 191 |
+
</thead>
|
| 192 |
+
<tbody>
|
| 193 |
+
{% for req in stats.request_history|reverse %}
|
| 194 |
+
<tr data-model="{{ req.model }}" data-status="{{ 'success' if req.success else 'fail' }}" data-id="{{ req.id }}">
|
| 195 |
+
<td title="{{ req.id }}">{{ req.id[:8] }}...</td>
|
| 196 |
+
<td>{{ req.timestamp|format_datetime }}</td>
|
| 197 |
+
<td><span class="model-badge small">{{ req.model }}</span></td>
|
| 198 |
+
<td title="{{ req.account }}">
|
| 199 |
+
<div class="account-cell">
|
| 200 |
+
<span class="account-avatar small">{{ req.account[0]|upper }}</span>
|
| 201 |
+
<span>{{ req.account.split('@')[0] }}</span>
|
| 202 |
+
</div>
|
| 203 |
+
</td>
|
| 204 |
+
<td class="{{ 'success' if req.success else 'fail' }}">
|
| 205 |
+
<span class="status-badge {{ 'success' if req.success else 'fail' }}">
|
| 206 |
+
<i class="fas {{ 'fa-check-circle' if req.success else 'fa-times-circle' }}"></i>
|
| 207 |
+
{{ '成功' if req.success else '失败' }}
|
| 208 |
+
</span>
|
| 209 |
+
</td>
|
| 210 |
+
<td>{{ req.duration_ms|format_duration }}</td>
|
| 211 |
+
<td>{{ (req.total_tokens if req.total_tokens is defined else req.estimated_total_tokens if req.estimated_total_tokens is defined else '-')|format_number if (req.total_tokens is defined or req.estimated_total_tokens is defined) else '-' }}</td>
|
| 212 |
+
</tr>
|
| 213 |
+
{% endfor %}
|
| 214 |
+
</tbody>
|
| 215 |
+
</table>
|
| 216 |
+
</div>
|
| 217 |
+
<div class="pagination">
|
| 218 |
+
<button id="prev-page" disabled><i class="fas fa-chevron-left"></i> 上一页</button>
|
| 219 |
+
<span id="page-info">第 <span id="current-page">1</span> 页,共 <span id="total-pages">1</span> 页</span>
|
| 220 |
+
<button id="next-page"><i class="fas fa-chevron-right"></i> 下一页</button>
|
| 221 |
+
</div>
|
| 222 |
+
</section>
|
| 223 |
+
|
| 224 |
+
<footer class="main-footer">
|
| 225 |
+
<div class="footer-content">
|
| 226 |
+
<div class="footer-logo">
|
| 227 |
+
<h3>2API <span>统计面板</span></h3>
|
| 228 |
+
</div>
|
| 229 |
+
<div class="footer-info">
|
| 230 |
+
<p>© 2025 2API 统计面板 | 版本 1.0.1</p>
|
| 231 |
+
<p>数据每60秒自动刷新</p>
|
| 232 |
+
</div>
|
| 233 |
+
</div>
|
| 234 |
+
</footer>
|
| 235 |
+
</div>
|
| 236 |
+
</div>
|
| 237 |
+
|
| 238 |
+
<script src="{{ url_for('static', filename='js/scripts.js') }}"></script>
|
| 239 |
+
</body>
|
| 240 |
+
</html>
|
utils.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
import tiktoken
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from typing import Dict, Any, Optional, Tuple
|
| 8 |
+
|
| 9 |
+
# 配置日志
|
| 10 |
+
def setup_logging():
|
| 11 |
+
"""配置日志系统"""
|
| 12 |
+
log_path = os.environ.get("LOG_PATH", "/tmp/2api.log")
|
| 13 |
+
log_level_str = os.environ.get("LOG_LEVEL", "INFO").upper()
|
| 14 |
+
log_level = getattr(logging, log_level_str, logging.INFO)
|
| 15 |
+
log_format = os.environ.get("LOG_FORMAT", "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
| 16 |
+
|
| 17 |
+
file_handler = logging.FileHandler(log_path, encoding='utf-8')
|
| 18 |
+
stream_handler = logging.StreamHandler()
|
| 19 |
+
logging.basicConfig(
|
| 20 |
+
level=log_level,
|
| 21 |
+
format=log_format,
|
| 22 |
+
handlers=[stream_handler, file_handler]
|
| 23 |
+
)
|
| 24 |
+
return logging.getLogger('2api')
|
| 25 |
+
|
| 26 |
+
logger = setup_logging()
|
| 27 |
+
|
| 28 |
+
def load_config():
|
| 29 |
+
"""从 config.json 加载配置(如果存在),否则使用环境变量"""
|
| 30 |
+
default_config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json')
|
| 31 |
+
CONFIG_FILE = os.environ.get("CONFIG_FILE_PATH", default_config_path)
|
| 32 |
+
config = {}
|
| 33 |
+
|
| 34 |
+
if os.path.exists(CONFIG_FILE):
|
| 35 |
+
try:
|
| 36 |
+
with open(CONFIG_FILE, 'r', encoding='utf-8') as f:
|
| 37 |
+
config = json.load(f)
|
| 38 |
+
logger.info(f"已从 {CONFIG_FILE} 加载配置")
|
| 39 |
+
except (json.JSONDecodeError, IOError) as e:
|
| 40 |
+
logger.error(f"加载配置文件失败: {e}")
|
| 41 |
+
config = {}
|
| 42 |
+
|
| 43 |
+
return config
|
| 44 |
+
|
| 45 |
+
def mask_email(email: str) -> str:
|
| 46 |
+
"""隐藏邮箱中间部分,保护隐私"""
|
| 47 |
+
if not email or '@' not in email:
|
| 48 |
+
return "无效邮箱"
|
| 49 |
+
|
| 50 |
+
parts = email.split('@')
|
| 51 |
+
username = parts[0]
|
| 52 |
+
domain = parts[1]
|
| 53 |
+
|
| 54 |
+
if len(username) <= 3:
|
| 55 |
+
masked_username = username[0] + '*' * (len(username) - 1)
|
| 56 |
+
else:
|
| 57 |
+
masked_username = username[0] + '*' * (len(username) - 2) + username[-1]
|
| 58 |
+
|
| 59 |
+
return f"{masked_username}@{domain}"
|
| 60 |
+
|
| 61 |
+
def generate_request_id() -> str:
|
| 62 |
+
"""生成唯一的请求ID"""
|
| 63 |
+
return f"chatcmpl-{os.urandom(16).hex()}"
|
| 64 |
+
|
| 65 |
+
def count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
|
| 66 |
+
"""
|
| 67 |
+
计算文本的token数量
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
text: 要计算token数量的文本
|
| 71 |
+
model: 模型名称,默认为gpt-3.5-turbo
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
int: token数量
|
| 75 |
+
"""
|
| 76 |
+
# 类型保护,防止text为None或非字符串类型
|
| 77 |
+
if text is None:
|
| 78 |
+
text = ""
|
| 79 |
+
elif not isinstance(text, str):
|
| 80 |
+
text = str(text)
|
| 81 |
+
try:
|
| 82 |
+
# 根据模型名称获取编码器
|
| 83 |
+
if "gpt-4" in model:
|
| 84 |
+
encoding = tiktoken.encoding_for_model("gpt-4")
|
| 85 |
+
elif "gpt-3.5" in model:
|
| 86 |
+
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
|
| 87 |
+
elif "claude" in model:
|
| 88 |
+
# Claude模型使用cl100k_base编码器
|
| 89 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
| 90 |
+
else:
|
| 91 |
+
# 默认使用cl100k_base编码器
|
| 92 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
| 93 |
+
|
| 94 |
+
# 计算token数量
|
| 95 |
+
tokens = encoding.encode(text)
|
| 96 |
+
return len(tokens)
|
| 97 |
+
except Exception as e:
|
| 98 |
+
logger.error(f"计算token数量时出错: {e}")
|
| 99 |
+
# 如果出错,使用简单的估算方法(每4个字符约为1个token)
|
| 100 |
+
return len(text) // 4
|
| 101 |
+
|
| 102 |
+
def count_message_tokens(messages: list, model: str = "gpt-3.5-turbo") -> Tuple[int, int, int]:
|
| 103 |
+
"""
|
| 104 |
+
计算OpenAI格式消息列表的token数量
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
messages: OpenAI格式的消息列表
|
| 108 |
+
model: 模型名称,默认为gpt-3.5-turbo
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
Tuple[int, int, int]: (提示tokens数, 完成tokens数, 总tokens数)
|
| 112 |
+
"""
|
| 113 |
+
# 类型保护,防止messages为None或非列表类型
|
| 114 |
+
if messages is None:
|
| 115 |
+
messages = []
|
| 116 |
+
elif not isinstance(messages, list):
|
| 117 |
+
logger.warning(f"count_message_tokens 收到非列表类型的消息: {type(messages)}")
|
| 118 |
+
messages = []
|
| 119 |
+
|
| 120 |
+
prompt_tokens = 0
|
| 121 |
+
completion_tokens = 0
|
| 122 |
+
|
| 123 |
+
try:
|
| 124 |
+
# 计算提示tokens
|
| 125 |
+
for message in messages:
|
| 126 |
+
# 确保message是字典类型
|
| 127 |
+
if not isinstance(message, dict):
|
| 128 |
+
logger.warning(f"跳过非字典类型的消息: {type(message)}")
|
| 129 |
+
continue
|
| 130 |
+
|
| 131 |
+
role = message.get('role', '')
|
| 132 |
+
content = message.get('content', '')
|
| 133 |
+
|
| 134 |
+
if role and content:
|
| 135 |
+
# 每条消息的基本token开销
|
| 136 |
+
prompt_tokens += 4 # 每条消息的基本开销
|
| 137 |
+
|
| 138 |
+
# 角色名称的token
|
| 139 |
+
prompt_tokens += 1 # 角色名称的开销
|
| 140 |
+
|
| 141 |
+
# 内容的token
|
| 142 |
+
prompt_tokens += count_tokens(content, model)
|
| 143 |
+
|
| 144 |
+
# 如果是assistant角色,计算完成tokens
|
| 145 |
+
if role == 'assistant':
|
| 146 |
+
completion_tokens += count_tokens(content, model)
|
| 147 |
+
|
| 148 |
+
# 消息结束的token
|
| 149 |
+
prompt_tokens += 2 # 消息结束的开销
|
| 150 |
+
|
| 151 |
+
# 计算总tokens
|
| 152 |
+
total_tokens = prompt_tokens + completion_tokens
|
| 153 |
+
|
| 154 |
+
return prompt_tokens, completion_tokens, total_tokens
|
| 155 |
+
except Exception as e:
|
| 156 |
+
logger.error(f"计算消息token数量时出错: {e}")
|
| 157 |
+
# 返回安全的默认值
|
| 158 |
+
return 0, 0, 0
|