Spaces:
Running
Running
File size: 73,889 Bytes
f0bf113 f4286d8 4f80088 f4286d8 990d959 bc4a196 f4286d8 f0bf113 bc4a196 da9c7fc 990d959 f0bf113 990d959 f4286d8 990d959 f4286d8 990d959 f4286d8 990d959 f4286d8 0a78d0a f4286d8 990d959 f4286d8 990d959 f4286d8 990d959 f4286d8 f0bf113 f4286d8 f0bf113 f4286d8 2a40305 f0bf113 4f80088 f0bf113 2a40305 f0bf113 f4286d8 bc4a196 f4286d8 990d959 f4286d8 bc4a196 f4286d8 da9c7fc bc4a196 f4286d8 bc4a196 0a78d0a f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 f0bf113 bc4a196 990d959 eb60932 990d959 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 da9c7fc 990d959 f0bf113 990d959 da9c7fc f4286d8 990d959 f4286d8 f0bf113 2a40305 bc4a196 2a40305 bc4a196 f0bf113 bc4a196 bb2daf6 bc4a196 bb2daf6 bc4a196 f0bf113 bc4a196 bb2daf6 bc4a196 bb2daf6 bc4a196 f0bf113 bc4a196 bb2daf6 bc4a196 bb2daf6 bc4a196 f0bf113 bc4a196 f0bf113 bc4a196 f0bf113 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 0a78d0a f4286d8 0a78d0a bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 da9c7fc bc4a196 da9c7fc bc4a196 da9c7fc bc4a196 da9c7fc bc4a196 da9c7fc bc4a196 da9c7fc bc4a196 da9c7fc bc4a196 0a78d0a bc4a196 0a78d0a bc4a196 0a78d0a bc4a196 40a5b01 bc4a196 0a78d0a bc4a196 40a5b01 0a78d0a 4f80088 f0bf113 4f80088 f0bf113 4f80088 f0bf113 4f80088 f0bf113 4f80088 40a5b01 4f80088 40a5b01 4f80088 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 f0bf113 bc4a196 4f80088 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 990d959 f4286d8 990d959 f4286d8 990d959 f4286d8 990d959 4f80088 990d959 bc4a196 990d959 eb60932 990d959 eb60932 990d959 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 bc4a196 f4286d8 da9c7fc 0a78d0a eb60932 0a78d0a 990d959 0a78d0a 990d959 0a78d0a f4286d8 d95d30f f4286d8 eb60932 0a78d0a da9c7fc 0a78d0a da9c7fc 0a78d0a da9c7fc f4286d8 0a78d0a f4286d8 bc4a196 f4286d8 eb60932 f4286d8 8ed6906 f4286d8 8ed6906 f4286d8 bc4a196 f4286d8 4f80088 f4286d8 4f80088 f4286d8 40a5b01 f4286d8 40a5b01 f4286d8 40a5b01 f4286d8 40a5b01 f4286d8 990d959 f0bf113 990d959 f0bf113 990d959 f4286d8 990d959 f4286d8 990d959 f4286d8 990d959 f4286d8 990d959 f4286d8 990d959 f4286d8 990d959 bc4a196 f4286d8 990d959 f4286d8 f0bf113 f4286d8 f0bf113 f4286d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 |
import json, time, os, asyncio, uuid, ssl, re, yaml, shutil
from datetime import datetime, timezone, timedelta
from typing import List, Optional, Union, Dict, Any
from pathlib import Path
import logging
from dotenv import load_dotenv
import httpx
import aiofiles
from fastapi import FastAPI, HTTPException, Header, Request, Body, Form
from fastapi.responses import StreamingResponse, HTMLResponse, JSONResponse, RedirectResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
from util.streaming_parser import parse_json_array_stream_async
from collections import deque
from threading import Lock
# ---------- 数据目录配置 ----------
# 自动检测环境:HF Spaces Pro 使用 /data,本地使用 ./data
if os.path.exists("/data"):
DATA_DIR = "/data" # HF Pro 持久化存储
logger_prefix = "[HF-PRO]"
else:
DATA_DIR = "./data" # 本地持久化存储
logger_prefix = "[LOCAL]"
# 确保数据目录存在
os.makedirs(DATA_DIR, exist_ok=True)
# 统一的数据文件路径
ACCOUNTS_FILE = os.path.join(DATA_DIR, "accounts.json")
SETTINGS_FILE = os.path.join(DATA_DIR, "settings.yaml")
STATS_FILE = os.path.join(DATA_DIR, "stats.json")
IMAGE_DIR = os.path.join(DATA_DIR, "images")
# 确保图片目录存在
os.makedirs(IMAGE_DIR, exist_ok=True)
# 导入认证模块
from core.auth import verify_api_key
from core.session_auth import is_logged_in, login_user, logout_user, require_login, generate_session_secret
# 导入核心模块
from core.message import (
get_conversation_key,
parse_last_message,
build_full_context_text
)
from core.google_api import (
get_common_headers,
create_google_session,
upload_context_file,
get_session_file_metadata,
download_image_with_jwt,
save_image_to_hf
)
from core.account import (
AccountManager,
MultiAccountManager,
format_account_expiration,
load_multi_account_config,
load_accounts_from_source,
update_accounts_config as _update_accounts_config,
delete_account as _delete_account,
update_account_disabled_status as _update_account_disabled_status
)
# 导入 Uptime 追踪器
from core import uptime as uptime_tracker
# 导入配置管理和模板系统
from fastapi.templating import Jinja2Templates
from core.config import config_manager, config
from util.template_helpers import prepare_admin_template_data
# ---------- 日志配置 ----------
# 内存日志缓冲区 (保留最近 3000 条日志,重启后清空)
log_buffer = deque(maxlen=3000)
log_lock = Lock()
# 统计数据持久化
stats_lock = asyncio.Lock() # 改为异步锁
async def load_stats():
"""加载统计数据(异步)"""
try:
if os.path.exists(STATS_FILE):
async with aiofiles.open(STATS_FILE, 'r', encoding='utf-8') as f:
content = await f.read()
return json.loads(content)
except Exception:
pass
return {
"total_visitors": 0,
"total_requests": 0,
"request_timestamps": [], # 最近1小时的请求时间戳
"visitor_ips": {}, # {ip: timestamp} 记录访问IP和时间
"account_conversations": {} # {account_id: conversation_count} 账户对话次数
}
async def save_stats(stats):
"""保存统计数据(异步,避免阻塞事件循环)"""
try:
async with aiofiles.open(STATS_FILE, 'w', encoding='utf-8') as f:
await f.write(json.dumps(stats, ensure_ascii=False, indent=2))
except Exception as e:
logger.error(f"[STATS] 保存统计数据失败: {str(e)[:50]}")
# 初始化统计数据(需要在启动时异步加载)
global_stats = {
"total_visitors": 0,
"total_requests": 0,
"request_timestamps": [],
"visitor_ips": {},
"account_conversations": {}
}
class MemoryLogHandler(logging.Handler):
"""自定义日志处理器,将日志写入内存缓冲区"""
def emit(self, record):
log_entry = self.format(record)
# 转换为北京时间(UTC+8)
beijing_tz = timezone(timedelta(hours=8))
beijing_time = datetime.fromtimestamp(record.created, tz=beijing_tz)
with log_lock:
log_buffer.append({
"time": beijing_time.strftime("%Y-%m-%d %H:%M:%S"),
"level": record.levelname,
"message": record.getMessage()
})
# 配置日志
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(levelname)s | %(message)s",
datefmt="%H:%M:%S",
)
logger = logging.getLogger("gemini")
# 添加内存日志处理器
memory_handler = MemoryLogHandler()
memory_handler.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s | %(message)s", datefmt="%H:%M:%S"))
logger.addHandler(memory_handler)
# ---------- 配置管理(使用统一配置系统)----------
# 所有配置通过 config_manager 访问,优先级:环境变量 > YAML > 默认值
TIMEOUT_SECONDS = 600
API_KEY = config.basic.api_key
PATH_PREFIX = config.security.path_prefix
ADMIN_KEY = config.security.admin_key
PROXY = config.basic.proxy
BASE_URL = config.basic.base_url
SESSION_SECRET_KEY = config.security.session_secret_key
SESSION_EXPIRE_HOURS = config.session.expire_hours
# ---------- 公开展示配置 ----------
LOGO_URL = config.public_display.logo_url
CHAT_URL = config.public_display.chat_url
# ---------- 图片生成配置 ----------
IMAGE_GENERATION_ENABLED = config.image_generation.enabled
IMAGE_GENERATION_MODELS = config.image_generation.supported_models
# ---------- 重试配置 ----------
MAX_NEW_SESSION_TRIES = config.retry.max_new_session_tries
MAX_REQUEST_RETRIES = config.retry.max_request_retries
MAX_ACCOUNT_SWITCH_TRIES = config.retry.max_account_switch_tries
ACCOUNT_FAILURE_THRESHOLD = config.retry.account_failure_threshold
RATE_LIMIT_COOLDOWN_SECONDS = config.retry.rate_limit_cooldown_seconds
SESSION_CACHE_TTL_SECONDS = config.retry.session_cache_ttl_seconds
# ---------- 模型映射配置 ----------
MODEL_MAPPING = {
"gemini-auto": None,
"gemini-2.5-flash": "gemini-2.5-flash",
"gemini-2.5-pro": "gemini-2.5-pro",
"gemini-3-flash-preview": "gemini-3-flash-preview",
"gemini-3-pro-preview": "gemini-3-pro-preview"
}
# ---------- HTTP 客户端 ----------
http_client = httpx.AsyncClient(
proxy=PROXY or None,
verify=False,
http2=False,
timeout=httpx.Timeout(TIMEOUT_SECONDS, connect=60.0),
limits=httpx.Limits(
max_keepalive_connections=100, # 增加5倍:20 -> 100
max_connections=200 # 增加4倍:50 -> 200
)
)
# ---------- 工具函数 ----------
def get_base_url(request: Request) -> str:
"""获取完整的base URL(优先环境变量,否则从请求自动获取)"""
# 优先使用环境变量
if BASE_URL:
return BASE_URL.rstrip("/")
# 自动从请求获取(兼容反向代理)
forwarded_proto = request.headers.get("x-forwarded-proto", request.url.scheme)
forwarded_host = request.headers.get("x-forwarded-host", request.headers.get("host"))
return f"{forwarded_proto}://{forwarded_host}"
# ---------- 常量定义 ----------
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36"
# ---------- 多账户支持 ----------
# (AccountConfig, AccountManager, MultiAccountManager 已移至 core/account.py)
# ---------- 配置文件管理 ----------
# (配置管理函数已移至 core/account.py)
# 初始化多账户管理器
multi_account_mgr = load_multi_account_config(
http_client,
USER_AGENT,
ACCOUNT_FAILURE_THRESHOLD,
RATE_LIMIT_COOLDOWN_SECONDS,
SESSION_CACHE_TTL_SECONDS,
global_stats
)
# 验证必需的环境变量
if not ADMIN_KEY:
logger.error("[SYSTEM] 未配置 ADMIN_KEY 环境变量,请设置后重启")
import sys
sys.exit(1)
# 启动日志
if PATH_PREFIX:
logger.info(f"[SYSTEM] 路径前缀已配置: {PATH_PREFIX[:4]}****")
logger.info(f"[SYSTEM] API端点: /{PATH_PREFIX}/v1/chat/completions")
logger.info(f"[SYSTEM] 管理端点: /{PATH_PREFIX}/")
else:
logger.info("[SYSTEM] 未配置路径前缀,使用默认路径")
logger.info("[SYSTEM] API端点: /v1/chat/completions")
logger.info("[SYSTEM] 管理端点: /admin/")
logger.info("[SYSTEM] 公开端点: /public/log/html, /public/stats, /public/uptime/html")
logger.info(f"[SYSTEM] Session过期时间: {SESSION_EXPIRE_HOURS}小时")
logger.info("[SYSTEM] 系统初始化完成")
# ---------- JWT 管理 ----------
# (JWTManager已移至 core/jwt.py)
# ---------- Session & File 管理 ----------
# (Google API函数已移至 core/google_api.py)
# ---------- 消息处理逻辑 ----------
# (消息处理函数已移至 core/message.py)
# ---------- OpenAI 兼容接口 ----------
app = FastAPI(title="Gemini-Business OpenAI Gateway")
# ---------- 模板系统配置 ----------
templates = Jinja2Templates(directory="templates")
# 开发模式:支持热更新
if os.getenv("ENV") == "development":
templates.env.auto_reload = True
logger.info("[SYSTEM] 模板热更新已启用(开发模式)")
# 挂载静态文件
app.mount("/static", StaticFiles(directory="static"), name="static")
# ---------- Session 中间件配置 ----------
from starlette.middleware.sessions import SessionMiddleware
app.add_middleware(
SessionMiddleware,
secret_key=SESSION_SECRET_KEY,
max_age=SESSION_EXPIRE_HOURS * 3600, # 转换为秒
same_site="lax",
https_only=False # 本地开发可设为False,生产环境建议True
)
# ---------- Uptime 追踪中间件 ----------
@app.middleware("http")
async def track_uptime_middleware(request: Request, call_next):
"""追踪每个请求的成功/失败状态,用于 Uptime 监控"""
# 只追踪 API 请求(排除静态文件、管理端点等)
path = request.url.path
if path.startswith("/images/") or path.startswith("/public/") or path.startswith("/favicon"):
return await call_next(request)
start_time = time.time()
success = False
model = None
try:
response = await call_next(request)
success = response.status_code < 400
# 尝试从请求中提取模型信息
if hasattr(request.state, "model"):
model = request.state.model
# 记录 API 主服务状态
uptime_tracker.record_request("api_service", success)
# 如果有模型信息,记录模型状态
if model and model in uptime_tracker.SUPPORTED_MODELS:
uptime_tracker.record_request(model, success)
return response
except Exception as e:
# 请求失败 - 尝试提取模型信息(可能在异常前已设置)
if hasattr(request.state, "model"):
model = request.state.model
uptime_tracker.record_request("api_service", False)
if model and model in uptime_tracker.SUPPORTED_MODELS:
uptime_tracker.record_request(model, False)
raise
# ---------- 图片静态服务初始化 ----------
os.makedirs(IMAGE_DIR, exist_ok=True)
app.mount("/images", StaticFiles(directory=IMAGE_DIR), name="images")
if IMAGE_DIR == "/data/images":
logger.info(f"[SYSTEM] 图片静态服务已启用: /images/ -> {IMAGE_DIR} (HF Pro持久化)")
else:
logger.info(f"[SYSTEM] 图片静态服务已启用: /images/ -> {IMAGE_DIR} (本地持久化)")
# ---------- 后台任务启动 ----------
@app.on_event("startup")
async def startup_event():
"""应用启动时初始化后台任务"""
global global_stats
# 文件迁移逻辑:将根目录的旧文件迁移到 data 目录
old_accounts = "accounts.json"
if os.path.exists(old_accounts) and not os.path.exists(ACCOUNTS_FILE):
try:
shutil.copy(old_accounts, ACCOUNTS_FILE)
logger.info(f"{logger_prefix} 已迁移 {old_accounts} -> {ACCOUNTS_FILE}")
except Exception as e:
logger.warning(f"{logger_prefix} 文件迁移失败: {e}")
# 加载统计数据
global_stats = await load_stats()
logger.info(f"[SYSTEM] 统计数据已加载: {global_stats['total_requests']} 次请求, {global_stats['total_visitors']} 位访客")
# 启动缓存清理任务
asyncio.create_task(multi_account_mgr.start_background_cleanup())
logger.info("[SYSTEM] 后台缓存清理任务已启动(间隔: 5分钟)")
# 启动 Uptime 数据聚合任务
asyncio.create_task(uptime_tracker.uptime_aggregation_task())
logger.info("[SYSTEM] Uptime 数据聚合任务已启动(间隔: 240秒)")
# ---------- 日志脱敏函数 ----------
def get_sanitized_logs(limit: int = 100) -> list:
"""获取脱敏后的日志列表,按请求ID分组并提取关键事件"""
with log_lock:
logs = list(log_buffer)
# 按请求ID分组(支持两种格式:带[req_xxx]和不带的)
request_logs = {}
orphan_logs = [] # 没有request_id的日志(如选择账户)
for log in logs:
message = log["message"]
req_match = re.search(r'\[req_([a-z0-9]+)\]', message)
if req_match:
request_id = req_match.group(1)
if request_id not in request_logs:
request_logs[request_id] = []
request_logs[request_id].append(log)
else:
# 没有request_id的日志(如选择账户),暂存
orphan_logs.append(log)
# 将orphan_logs(如选择账户)关联到对应的请求
# 策略:将orphan日志关联到时间上最接近的后续请求
for orphan in orphan_logs:
orphan_time = orphan["time"]
# 找到时间上最接近且在orphan之后的请求
closest_request_id = None
min_time_diff = None
for request_id, req_logs in request_logs.items():
if req_logs:
first_log_time = req_logs[0]["time"]
# orphan应该在请求之前或同时
if first_log_time >= orphan_time:
if min_time_diff is None or first_log_time < min_time_diff:
min_time_diff = first_log_time
closest_request_id = request_id
# 如果找到最接近的请求,将orphan日志插入到该请求的日志列表开头
if closest_request_id:
request_logs[closest_request_id].insert(0, orphan)
# 为每个请求提取关键事件
sanitized = []
for request_id, req_logs in request_logs.items():
# 收集关键信息
model = None
message_count = None
retry_events = []
final_status = "in_progress"
duration = None
start_time = req_logs[0]["time"]
# 遍历该请求的所有日志
for log in req_logs:
message = log["message"]
# 提取模型名称和消息数量(开始对话)
if '收到请求:' in message and not model:
model_match = re.search(r'收到请求: ([^ |]+)', message)
if model_match:
model = model_match.group(1)
count_match = re.search(r'(\d+)条消息', message)
if count_match:
message_count = int(count_match.group(1))
# 提取重试事件(包括失败尝试、账户切换、选择账户)
# 注意:不提取"正在重试"日志,因为它和"失败 (尝试"是配套的
if any(keyword in message for keyword in ['切换账户', '选择账户', '失败 (尝试']):
retry_events.append({
"time": log["time"],
"message": message
})
# 提取响应完成(最高优先级 - 最终成功则忽略中间错误)
if '响应完成:' in message:
time_match = re.search(r'响应完成: ([\d.]+)秒', message)
if time_match:
duration = time_match.group(1) + 's'
final_status = "success"
# 检测非流式响应完成
if '非流式响应完成' in message:
final_status = "success"
# 检测失败状态(仅在非success状态下)
if final_status != "success" and (log['level'] == 'ERROR' or '失败' in message):
final_status = "error"
# 检测超时(仅在非success状态下)
if final_status != "success" and '超时' in message:
final_status = "timeout"
# 如果没有模型信息但有错误,仍然显示
if not model and final_status == "in_progress":
continue
# 构建关键事件列表
events = []
# 1. 开始对话
if model:
events.append({
"time": start_time,
"type": "start",
"content": f"{model} | {message_count}条消息" if message_count else model
})
else:
# 没有模型信息但有错误的情况
events.append({
"time": start_time,
"type": "start",
"content": "请求处理中"
})
# 2. 重试事件
failure_count = 0 # 失败重试计数
account_select_count = 0 # 账户选择计数
for i, retry in enumerate(retry_events):
msg = retry["message"]
# 识别不同类型的重试事件(按优先级匹配)
if '失败 (尝试' in msg:
# 创建会话失败
failure_count += 1
events.append({
"time": retry["time"],
"type": "retry",
"content": f"服务异常,正在重试({failure_count})"
})
elif '选择账户' in msg:
# 账户选择/切换
account_select_count += 1
# 检查下一条日志是否是"切换账户",如果是则跳过当前"选择账户"(避免重复)
next_is_switch = (i + 1 < len(retry_events) and '切换账户' in retry_events[i + 1]["message"])
if not next_is_switch:
if account_select_count == 1:
# 第一次选择:显示为"选择服务节点"
events.append({
"time": retry["time"],
"type": "select",
"content": "选择服务节点"
})
else:
# 第二次及以后:显示为"切换服务节点"
events.append({
"time": retry["time"],
"type": "switch",
"content": "切换服务节点"
})
elif '切换账户' in msg:
# 运行时切换账户(显示为"切换服务节点")
events.append({
"time": retry["time"],
"type": "switch",
"content": "切换服务节点"
})
# 3. 完成事件
if final_status == "success":
if duration:
events.append({
"time": req_logs[-1]["time"],
"type": "complete",
"status": "success",
"content": f"响应完成 | 耗时{duration}"
})
else:
events.append({
"time": req_logs[-1]["time"],
"type": "complete",
"status": "success",
"content": "响应完成"
})
elif final_status == "error":
events.append({
"time": req_logs[-1]["time"],
"type": "complete",
"status": "error",
"content": "请求失败"
})
elif final_status == "timeout":
events.append({
"time": req_logs[-1]["time"],
"type": "complete",
"status": "timeout",
"content": "请求超时"
})
sanitized.append({
"request_id": request_id,
"start_time": start_time,
"status": final_status,
"events": events
})
# 按时间排序并限制数量
sanitized.sort(key=lambda x: x["start_time"], reverse=True)
return sanitized[:limit]
class Message(BaseModel):
role: str
content: Union[str, List[Dict[str, Any]]]
class ChatRequest(BaseModel):
model: str = "gemini-auto"
messages: List[Message]
stream: bool = False
temperature: Optional[float] = 0.7
top_p: Optional[float] = 1.0
def create_chunk(id: str, created: int, model: str, delta: dict, finish_reason: Union[str, None]) -> str:
chunk = {
"id": id,
"object": "chat.completion.chunk",
"created": created,
"model": model,
"choices": [{
"index": 0,
"delta": delta,
"logprobs": None, # OpenAI 标准字段
"finish_reason": finish_reason
}],
"system_fingerprint": None # OpenAI 标准字段(可选)
}
return json.dumps(chunk)
# ---------- 辅助函数 ----------
def get_admin_template_data(request: Request):
"""获取管理页面模板数据(避免重复代码)"""
return prepare_admin_template_data(
request, multi_account_mgr, log_buffer, log_lock,
api_key=API_KEY, base_url=BASE_URL, proxy=PROXY,
logo_url=LOGO_URL, chat_url=CHAT_URL, path_prefix=PATH_PREFIX,
max_new_session_tries=MAX_NEW_SESSION_TRIES,
max_request_retries=MAX_REQUEST_RETRIES,
max_account_switch_tries=MAX_ACCOUNT_SWITCH_TRIES,
account_failure_threshold=ACCOUNT_FAILURE_THRESHOLD,
rate_limit_cooldown_seconds=RATE_LIMIT_COOLDOWN_SECONDS,
session_cache_ttl_seconds=SESSION_CACHE_TTL_SECONDS
)
# ---------- 路由定义 ----------
@app.get("/")
async def home(request: Request):
"""首页 - 根据PATH_PREFIX配置决定行为"""
if PATH_PREFIX:
# 如果设置了PATH_PREFIX(隐藏模式),首页返回404,不暴露任何信息
raise HTTPException(404, "Not Found")
else:
# 未设置PATH_PREFIX(公开模式),根据登录状态重定向
if is_logged_in(request):
template_data = get_admin_template_data(request)
return templates.TemplateResponse("admin/index.html", template_data)
else:
return RedirectResponse(url="/login", status_code=302)
# ---------- 登录/登出端点(支持可选PATH_PREFIX) ----------
# 不带PATH_PREFIX的登录端点
@app.get("/login")
async def admin_login_get(request: Request, error: str = None):
"""登录页面"""
return templates.TemplateResponse("auth/login.html", {"request": request, "error": error})
@app.post("/login")
async def admin_login_post(request: Request, admin_key: str = Form(...)):
"""处理登录表单提交"""
if admin_key == ADMIN_KEY:
login_user(request)
logger.info(f"[AUTH] 管理员登录成功")
return RedirectResponse(url="/", status_code=302)
else:
logger.warning(f"[AUTH] 登录失败 - 密钥错误")
return templates.TemplateResponse("auth/login.html", {"request": request, "error": "密钥错误,请重试"})
@app.post("/logout")
@require_login(redirect_to_login=False)
async def admin_logout(request: Request):
"""登出"""
logout_user(request)
logger.info(f"[AUTH] 管理员已登出")
return RedirectResponse(url="/login", status_code=302)
# 带PATH_PREFIX的登录端点(如果配置了PATH_PREFIX)
if PATH_PREFIX:
@app.get(f"/{PATH_PREFIX}/login")
async def admin_login_get_prefixed(request: Request, error: str = None):
"""登录页面(带前缀)"""
return templates.TemplateResponse("auth/login.html", {"request": request, "error": error})
@app.post(f"/{PATH_PREFIX}/login")
async def admin_login_post_prefixed(request: Request, admin_key: str = Form(...)):
"""处理登录表单提交(带前缀)"""
if admin_key == ADMIN_KEY:
login_user(request)
logger.info(f"[AUTH] 管理员登录成功")
return RedirectResponse(url=f"/{PATH_PREFIX}", status_code=302)
else:
logger.warning(f"[AUTH] 登录失败 - 密钥错误")
return templates.TemplateResponse("auth/login.html", {"request": request, "error": "密钥错误,请重试"})
@app.post(f"/{PATH_PREFIX}/logout")
@require_login(redirect_to_login=False)
async def admin_logout_prefixed(request: Request):
"""登出(带前缀)"""
logout_user(request)
logger.info(f"[AUTH] 管理员已登出")
return RedirectResponse(url=f"/{PATH_PREFIX}/login", status_code=302)
# ---------- 管理端点(需要登录) ----------
# 不带PATH_PREFIX的管理端点
@app.get("/admin")
@require_login()
async def admin_home_no_prefix(request: Request):
"""管理首页"""
template_data = get_admin_template_data(request)
return templates.TemplateResponse("admin/index.html", template_data)
# 带PATH_PREFIX的管理端点(如果配置了PATH_PREFIX)
if PATH_PREFIX:
@app.get(f"/{PATH_PREFIX}")
@require_login()
async def admin_home_prefixed(request: Request):
"""管理首页(带前缀)"""
return await admin_home_no_prefix(request=request)
# ---------- 管理API端点(需要登录) ----------
@app.get("/admin/health")
@require_login()
async def admin_health(request: Request):
return {"status": "ok", "time": datetime.utcnow().isoformat()}
@app.get("/admin/accounts")
@require_login()
async def admin_get_accounts(request: Request):
"""获取所有账户的状态信息"""
accounts_info = []
for account_id, account_manager in multi_account_mgr.accounts.items():
config = account_manager.config
remaining_hours = config.get_remaining_hours()
status, status_color, remaining_display = format_account_expiration(remaining_hours)
cooldown_seconds, cooldown_reason = account_manager.get_cooldown_info()
accounts_info.append({
"id": config.account_id,
"status": status,
"expires_at": config.expires_at or "未设置",
"remaining_hours": remaining_hours,
"remaining_display": remaining_display,
"is_available": account_manager.is_available,
"error_count": account_manager.error_count,
"disabled": config.disabled,
"cooldown_seconds": cooldown_seconds,
"cooldown_reason": cooldown_reason,
"conversation_count": account_manager.conversation_count
})
return {"total": len(accounts_info), "accounts": accounts_info}
@app.get("/admin/accounts-config")
@require_login()
async def admin_get_config(request: Request):
"""获取完整账户配置"""
try:
accounts_data = load_accounts_from_source()
return {"accounts": accounts_data}
except Exception as e:
logger.error(f"[CONFIG] 获取配置失败: {str(e)}")
raise HTTPException(500, f"获取失败: {str(e)}")
@app.put("/admin/accounts-config")
@require_login()
async def admin_update_config(request: Request, accounts_data: list = Body(...)):
"""更新整个账户配置"""
global multi_account_mgr
try:
multi_account_mgr = _update_accounts_config(
accounts_data, multi_account_mgr, http_client, USER_AGENT,
ACCOUNT_FAILURE_THRESHOLD, RATE_LIMIT_COOLDOWN_SECONDS,
SESSION_CACHE_TTL_SECONDS, global_stats
)
return {"status": "success", "message": "配置已更新", "account_count": len(multi_account_mgr.accounts)}
except Exception as e:
logger.error(f"[CONFIG] 更新配置失败: {str(e)}")
raise HTTPException(500, f"更新失败: {str(e)}")
@app.delete("/admin/accounts/{account_id}")
@require_login()
async def admin_delete_account(request: Request, account_id: str):
"""删除单个账户"""
global multi_account_mgr
try:
multi_account_mgr = _delete_account(
account_id, multi_account_mgr, http_client, USER_AGENT,
ACCOUNT_FAILURE_THRESHOLD, RATE_LIMIT_COOLDOWN_SECONDS,
SESSION_CACHE_TTL_SECONDS, global_stats
)
return {"status": "success", "message": f"账户 {account_id} 已删除", "account_count": len(multi_account_mgr.accounts)}
except Exception as e:
logger.error(f"[CONFIG] 删除账户失败: {str(e)}")
raise HTTPException(500, f"删除失败: {str(e)}")
@app.put("/admin/accounts/{account_id}/disable")
@require_login()
async def admin_disable_account(request: Request, account_id: str):
"""手动禁用账户"""
global multi_account_mgr
try:
multi_account_mgr = _update_account_disabled_status(
account_id, True, multi_account_mgr, http_client, USER_AGENT,
ACCOUNT_FAILURE_THRESHOLD, RATE_LIMIT_COOLDOWN_SECONDS,
SESSION_CACHE_TTL_SECONDS, global_stats
)
return {"status": "success", "message": f"账户 {account_id} 已禁用", "account_count": len(multi_account_mgr.accounts)}
except Exception as e:
logger.error(f"[CONFIG] 禁用账户失败: {str(e)}")
raise HTTPException(500, f"禁用失败: {str(e)}")
@app.put("/admin/accounts/{account_id}/enable")
@require_login()
async def admin_enable_account(request: Request, account_id: str):
"""启用账户(同时重置错误禁用状态)"""
global multi_account_mgr
try:
multi_account_mgr = _update_account_disabled_status(
account_id, False, multi_account_mgr, http_client, USER_AGENT,
ACCOUNT_FAILURE_THRESHOLD, RATE_LIMIT_COOLDOWN_SECONDS,
SESSION_CACHE_TTL_SECONDS, global_stats
)
# 重置运行时错误状态(允许手动恢复错误禁用的账户)
if account_id in multi_account_mgr.accounts:
account_mgr = multi_account_mgr.accounts[account_id]
account_mgr.is_available = True
account_mgr.error_count = 0
account_mgr.last_429_time = 0.0
logger.info(f"[CONFIG] 账户 {account_id} 错误状态已重置")
return {"status": "success", "message": f"账户 {account_id} 已启用", "account_count": len(multi_account_mgr.accounts)}
except Exception as e:
logger.error(f"[CONFIG] 启用账户失败: {str(e)}")
raise HTTPException(500, f"启用失败: {str(e)}")
# ---------- 系统设置 API ----------
@app.get("/admin/settings")
@require_login()
async def admin_get_settings(request: Request):
"""获取系统设置"""
# 返回当前配置(转换为字典格式)
return {
"basic": {
"api_key": config.basic.api_key,
"base_url": config.basic.base_url,
"proxy": config.basic.proxy
},
"image_generation": {
"enabled": config.image_generation.enabled,
"supported_models": config.image_generation.supported_models
},
"retry": {
"max_new_session_tries": config.retry.max_new_session_tries,
"max_request_retries": config.retry.max_request_retries,
"max_account_switch_tries": config.retry.max_account_switch_tries,
"account_failure_threshold": config.retry.account_failure_threshold,
"rate_limit_cooldown_seconds": config.retry.rate_limit_cooldown_seconds,
"session_cache_ttl_seconds": config.retry.session_cache_ttl_seconds
},
"public_display": {
"logo_url": config.public_display.logo_url,
"chat_url": config.public_display.chat_url
},
"session": {
"expire_hours": config.session.expire_hours
}
}
@app.put("/admin/settings")
@require_login()
async def admin_update_settings(request: Request, new_settings: dict = Body(...)):
"""更新系统设置"""
global API_KEY, PROXY, BASE_URL, LOGO_URL, CHAT_URL
global IMAGE_GENERATION_ENABLED, IMAGE_GENERATION_MODELS
global MAX_NEW_SESSION_TRIES, MAX_REQUEST_RETRIES, MAX_ACCOUNT_SWITCH_TRIES
global ACCOUNT_FAILURE_THRESHOLD, RATE_LIMIT_COOLDOWN_SECONDS, SESSION_CACHE_TTL_SECONDS
global SESSION_EXPIRE_HOURS, multi_account_mgr, http_client
try:
# 保存旧配置用于对比
old_proxy = PROXY
old_retry_config = {
"account_failure_threshold": ACCOUNT_FAILURE_THRESHOLD,
"rate_limit_cooldown_seconds": RATE_LIMIT_COOLDOWN_SECONDS,
"session_cache_ttl_seconds": SESSION_CACHE_TTL_SECONDS
}
# 保存到 YAML
config_manager.save_yaml(new_settings)
# 热更新配置
config_manager.reload()
# 更新全局变量(实时生效)
API_KEY = config.basic.api_key
PROXY = config.basic.proxy
BASE_URL = config.basic.base_url
LOGO_URL = config.public_display.logo_url
CHAT_URL = config.public_display.chat_url
IMAGE_GENERATION_ENABLED = config.image_generation.enabled
IMAGE_GENERATION_MODELS = config.image_generation.supported_models
MAX_NEW_SESSION_TRIES = config.retry.max_new_session_tries
MAX_REQUEST_RETRIES = config.retry.max_request_retries
MAX_ACCOUNT_SWITCH_TRIES = config.retry.max_account_switch_tries
ACCOUNT_FAILURE_THRESHOLD = config.retry.account_failure_threshold
RATE_LIMIT_COOLDOWN_SECONDS = config.retry.rate_limit_cooldown_seconds
SESSION_CACHE_TTL_SECONDS = config.retry.session_cache_ttl_seconds
SESSION_EXPIRE_HOURS = config.session.expire_hours
# 检查是否需要重建 HTTP 客户端(代理变化)
if old_proxy != PROXY:
logger.info(f"[CONFIG] 代理配置已变化,重建 HTTP 客户端")
await http_client.aclose() # 关闭旧客户端
http_client = httpx.AsyncClient(
proxy=PROXY or None,
verify=False,
http2=False,
timeout=httpx.Timeout(TIMEOUT_SECONDS, connect=60.0),
limits=httpx.Limits(
max_keepalive_connections=100,
max_connections=200
)
)
# 更新所有账户的 http_client 引用
multi_account_mgr.update_http_client(http_client)
# 检查是否需要更新账户管理器配置(重试策略变化)
retry_changed = (
old_retry_config["account_failure_threshold"] != ACCOUNT_FAILURE_THRESHOLD or
old_retry_config["rate_limit_cooldown_seconds"] != RATE_LIMIT_COOLDOWN_SECONDS or
old_retry_config["session_cache_ttl_seconds"] != SESSION_CACHE_TTL_SECONDS
)
if retry_changed:
logger.info(f"[CONFIG] 重试策略已变化,更新账户管理器配置")
# 更新所有账户管理器的配置
multi_account_mgr.cache_ttl = SESSION_CACHE_TTL_SECONDS
for account_id, account_mgr in multi_account_mgr.accounts.items():
account_mgr.account_failure_threshold = ACCOUNT_FAILURE_THRESHOLD
account_mgr.rate_limit_cooldown_seconds = RATE_LIMIT_COOLDOWN_SECONDS
logger.info(f"[CONFIG] 系统设置已更新并实时生效")
return {"status": "success", "message": "设置已保存并实时生效!"}
except Exception as e:
logger.error(f"[CONFIG] 更新设置失败: {str(e)}")
raise HTTPException(500, f"更新失败: {str(e)}")
@app.get("/admin/log")
@require_login()
async def admin_get_logs(
request: Request,
limit: int = 1500,
level: str = None,
search: str = None,
start_time: str = None,
end_time: str = None
):
with log_lock:
logs = list(log_buffer)
stats_by_level = {}
error_logs = []
chat_count = 0
for log in logs:
level_name = log.get("level", "INFO")
stats_by_level[level_name] = stats_by_level.get(level_name, 0) + 1
if level_name in ["ERROR", "CRITICAL"]:
error_logs.append(log)
if "收到请求" in log.get("message", ""):
chat_count += 1
if level:
level = level.upper()
logs = [log for log in logs if log["level"] == level]
if search:
logs = [log for log in logs if search.lower() in log["message"].lower()]
if start_time:
logs = [log for log in logs if log["time"] >= start_time]
if end_time:
logs = [log for log in logs if log["time"] <= end_time]
limit = min(limit, 3000)
filtered_logs = logs[-limit:]
return {
"total": len(filtered_logs),
"limit": limit,
"filters": {"level": level, "search": search, "start_time": start_time, "end_time": end_time},
"logs": filtered_logs,
"stats": {
"memory": {"total": len(log_buffer), "by_level": stats_by_level, "capacity": log_buffer.maxlen},
"errors": {"count": len(error_logs), "recent": error_logs[-10:]},
"chat_count": chat_count
}
}
@app.delete("/admin/log")
@require_login()
async def admin_clear_logs(request: Request, confirm: str = None):
if confirm != "yes":
raise HTTPException(400, "需要 confirm=yes 参数确认清空操作")
with log_lock:
cleared_count = len(log_buffer)
log_buffer.clear()
logger.info("[LOG] 日志已清空")
return {"status": "success", "message": "已清空内存日志", "cleared_count": cleared_count}
@app.get("/admin/log/html")
@require_login()
async def admin_logs_html_route(request: Request):
"""返回美化的 HTML 日志查看界面"""
return templates.TemplateResponse("admin/logs.html", {"request": request})
# 带PATH_PREFIX的管理API端点(如果配置了PATH_PREFIX)
if PATH_PREFIX:
@app.get(f"/{PATH_PREFIX}/health")
@require_login()
async def admin_health_prefixed(request: Request):
return await admin_health(request=request)
@app.get(f"/{PATH_PREFIX}/accounts")
@require_login()
async def admin_get_accounts_prefixed(request: Request):
return await admin_get_accounts(request=request)
@app.get(f"/{PATH_PREFIX}/accounts-config")
@require_login()
async def admin_get_config_prefixed(request: Request):
return await admin_get_config(request=request)
@app.put(f"/{PATH_PREFIX}/accounts-config")
@require_login()
async def admin_update_config_prefixed(request: Request, accounts_data: list = Body(...)):
return await admin_update_config(request=request, accounts_data=accounts_data)
@app.delete(f"/{PATH_PREFIX}/accounts/{{account_id}}")
@require_login()
async def admin_delete_account_prefixed(request: Request, account_id: str):
return await admin_delete_account(request=request, account_id=account_id)
@app.put(f"/{PATH_PREFIX}/accounts/{{account_id}}/disable")
@require_login()
async def admin_disable_account_prefixed(request: Request, account_id: str):
return await admin_disable_account(request=request, account_id=account_id)
@app.put(f"/{PATH_PREFIX}/accounts/{{account_id}}/enable")
@require_login()
async def admin_enable_account_prefixed(request: Request, account_id: str):
return await admin_enable_account(request=request, account_id=account_id)
@app.get(f"/{PATH_PREFIX}/log")
@require_login()
async def admin_get_logs_prefixed(
request: Request,
limit: int = 1500,
level: str = None,
search: str = None,
start_time: str = None,
end_time: str = None
):
return await admin_get_logs(request=request, limit=limit, level=level, search=search, start_time=start_time, end_time=end_time)
@app.delete(f"/{PATH_PREFIX}/log")
@require_login()
async def admin_clear_logs_prefixed(request: Request, confirm: str = None):
return await admin_clear_logs(request=request, confirm=confirm)
@app.get(f"/{PATH_PREFIX}/log/html")
@require_login()
async def admin_logs_html_route_prefixed(request: Request):
return await admin_logs_html_route(request=request)
@app.get(f"/{PATH_PREFIX}/settings")
@require_login()
async def admin_get_settings_prefixed(request: Request):
return await admin_get_settings(request=request)
@app.put(f"/{PATH_PREFIX}/settings")
@require_login()
async def admin_update_settings_prefixed(request: Request, new_settings: dict = Body(...)):
return await admin_update_settings(request=request, new_settings=new_settings)
# ---------- API端点(API Key认证) ----------
@app.get("/v1/models")
async def list_models(authorization: str = Header(None)):
verify_api_key(API_KEY, authorization)
data = []
now = int(time.time())
for m in MODEL_MAPPING.keys():
data.append({"id": m, "object": "model", "created": now, "owned_by": "google", "permission": []})
return {"object": "list", "data": data}
@app.get("/v1/models/{model_id}")
async def get_model(model_id: str, authorization: str = Header(None)):
verify_api_key(API_KEY, authorization)
return {"id": model_id, "object": "model"}
# 带PATH_PREFIX的API端点(如果配置了PATH_PREFIX)
if PATH_PREFIX:
@app.get(f"/{PATH_PREFIX}/v1/models")
async def list_models_prefixed(authorization: str = Header(None)):
return await list_models(authorization)
@app.get(f"/{PATH_PREFIX}/v1/models/{{model_id}}")
async def get_model_prefixed(model_id: str, authorization: str = Header(None)):
return await get_model(model_id, authorization)
# ---------- 聊天API端点 ----------
@app.post("/v1/chat/completions")
async def chat(
req: ChatRequest,
request: Request,
authorization: Optional[str] = Header(None)
):
# API Key 验证
verify_api_key(API_KEY, authorization)
# ... (保留原有的chat逻辑)
return await chat_impl(req, request, authorization)
if PATH_PREFIX:
@app.post(f"/{PATH_PREFIX}/v1/chat/completions")
async def chat_prefixed(
req: ChatRequest,
request: Request,
authorization: Optional[str] = Header(None)
):
return await chat(req, request, authorization)
# chat实现函数
async def chat_impl(
req: ChatRequest,
request: Request,
authorization: Optional[str]
):
# 生成请求ID(最优先,用于所有日志追踪)
request_id = str(uuid.uuid4())[:6]
# 获取客户端IP(用于会话隔离)
client_ip = request.headers.get("x-forwarded-for")
if client_ip:
client_ip = client_ip.split(",")[0].strip()
else:
client_ip = request.client.host if request.client else "unknown"
# 记录请求统计
async with stats_lock:
global_stats["total_requests"] += 1
global_stats["request_timestamps"].append(time.time())
await save_stats(global_stats)
# 2. 模型校验
if req.model not in MODEL_MAPPING:
logger.error(f"[CHAT] [req_{request_id}] 不支持的模型: {req.model}")
raise HTTPException(
status_code=404,
detail=f"Model '{req.model}' not found. Available models: {list(MODEL_MAPPING.keys())}"
)
# 保存模型信息到 request.state(用于 Uptime 追踪)
request.state.model = req.model
# 3. 生成会话指纹,获取Session锁(防止同一对话的并发请求冲突)
conv_key = get_conversation_key([m.model_dump() for m in req.messages], client_ip)
session_lock = await multi_account_mgr.acquire_session_lock(conv_key)
# 4. 在锁的保护下检查缓存和处理Session(保证同一对话的请求串行化)
async with session_lock:
cached_session = multi_account_mgr.global_session_cache.get(conv_key)
if cached_session:
# 使用已绑定的账户
account_id = cached_session["account_id"]
account_manager = await multi_account_mgr.get_account(account_id, request_id)
google_session = cached_session["session_id"]
is_new_conversation = False
logger.info(f"[CHAT] [{account_id}] [req_{request_id}] 继续会话: {google_session[-12:]}")
else:
# 新对话:轮询选择可用账户,失败时尝试其他账户
max_account_tries = min(MAX_NEW_SESSION_TRIES, len(multi_account_mgr.accounts))
last_error = None
for attempt in range(max_account_tries):
try:
account_manager = await multi_account_mgr.get_account(None, request_id)
google_session = await create_google_session(account_manager, http_client, USER_AGENT, request_id)
# 线程安全地绑定账户到此对话
await multi_account_mgr.set_session_cache(
conv_key,
account_manager.config.account_id,
google_session
)
is_new_conversation = True
logger.info(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] 新会话创建并绑定账户")
# 记录账号池状态(账户可用)
uptime_tracker.record_request("account_pool", True)
break
except Exception as e:
last_error = e
error_type = type(e).__name__
# 安全获取账户ID
account_id = account_manager.config.account_id if 'account_manager' in locals() and account_manager else 'unknown'
logger.error(f"[CHAT] [req_{request_id}] 账户 {account_id} 创建会话失败 (尝试 {attempt + 1}/{max_account_tries}) - {error_type}: {str(e)}")
# 记录账号池状态(单个账户失败)
uptime_tracker.record_request("account_pool", False)
if attempt == max_account_tries - 1:
logger.error(f"[CHAT] [req_{request_id}] 所有账户均不可用")
raise HTTPException(503, f"All accounts unavailable: {str(last_error)[:100]}")
# 继续尝试下一个账户
# 提取用户消息内容用于日志
if req.messages:
last_content = req.messages[-1].content
if isinstance(last_content, str):
# 显示完整消息,但限制在500字符以内
if len(last_content) > 500:
preview = last_content[:500] + "...(已截断)"
else:
preview = last_content
else:
preview = f"[多模态: {len(last_content)}部分]"
else:
preview = "[空消息]"
# 记录请求基本信息
logger.info(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] 收到请求: {req.model} | {len(req.messages)}条消息 | stream={req.stream}")
# 单独记录用户消息内容(方便查看)
logger.info(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] 用户消息: {preview}")
# 3. 解析请求内容
last_text, current_images = await parse_last_message(req.messages, http_client, request_id)
# 4. 准备文本内容
if is_new_conversation:
# 新对话只发送最后一条
text_to_send = last_text
is_retry_mode = True
else:
# 继续对话只发送当前消息
text_to_send = last_text
is_retry_mode = False
# 线程安全地更新时间戳
await multi_account_mgr.update_session_time(conv_key)
chat_id = f"chatcmpl-{uuid.uuid4()}"
created_time = int(time.time())
# 封装生成器 (含图片上传和重试逻辑)
async def response_wrapper():
nonlocal account_manager # 允许修改外层的 account_manager
retry_count = 0
max_retries = MAX_REQUEST_RETRIES # 使用配置的最大重试次数
current_text = text_to_send
current_retry_mode = is_retry_mode
# 图片 ID 列表 (每次 Session 变化都需要重新上传,因为 fileId 绑定在 Session 上)
current_file_ids = []
# 记录已失败的账户,避免重复使用
failed_accounts = set()
# 重试逻辑:最多尝试 max_retries+1 次(初次+重试)
while retry_count <= max_retries:
try:
# 安全:使用.get()防止缓存被清理导致KeyError
cached = multi_account_mgr.global_session_cache.get(conv_key)
if not cached:
logger.warning(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] 缓存已清理,重建Session")
new_sess = await create_google_session(account_manager, http_client, USER_AGENT, request_id)
await multi_account_mgr.set_session_cache(
conv_key,
account_manager.config.account_id,
new_sess
)
current_session = new_sess
current_retry_mode = True
current_file_ids = []
else:
current_session = cached["session_id"]
# A. 如果有图片且还没上传到当前 Session,先上传
# 注意:每次重试如果是新 Session,都需要重新上传图片
if current_images and not current_file_ids:
for img in current_images:
fid = await upload_context_file(current_session, img["mime"], img["data"], account_manager, http_client, USER_AGENT, request_id)
current_file_ids.append(fid)
# B. 准备文本 (重试模式下发全文)
if current_retry_mode:
current_text = build_full_context_text(req.messages)
# C. 发起对话
async for chunk in stream_chat_generator(
current_session,
current_text,
current_file_ids,
req.model,
chat_id,
created_time,
account_manager,
req.stream,
request_id,
request
):
yield chunk
# 请求成功,重置账户失败计数
account_manager.is_available = True
account_manager.error_count = 0
account_manager.conversation_count += 1 # 增加对话次数
# 记录账号池状态(请求成功)
uptime_tracker.record_request("account_pool", True)
# 保存对话次数到统计数据
async with stats_lock:
if "account_conversations" not in global_stats:
global_stats["account_conversations"] = {}
global_stats["account_conversations"][account_manager.config.account_id] = account_manager.conversation_count
await save_stats(global_stats)
break
except (httpx.HTTPError, ssl.SSLError, HTTPException) as e:
# 记录当前失败的账户
failed_accounts.add(account_manager.config.account_id)
# 记录账号池状态(请求失败)
uptime_tracker.record_request("account_pool", False)
# 检查是否为429错误(Rate Limit)
is_rate_limit = isinstance(e, HTTPException) and e.status_code == 429
# 增加账户失败计数(触发熔断机制)
account_manager.last_error_time = time.time()
if is_rate_limit:
account_manager.last_429_time = time.time()
account_manager.error_count += 1
if account_manager.error_count >= ACCOUNT_FAILURE_THRESHOLD:
account_manager.is_available = False
if is_rate_limit:
logger.error(f"[ACCOUNT] [{account_manager.config.account_id}] [req_{request_id}] 遇到429错误{account_manager.error_count}次,账户已禁用(需休息{RATE_LIMIT_COOLDOWN_SECONDS}秒)")
else:
logger.error(f"[ACCOUNT] [{account_manager.config.account_id}] [req_{request_id}] 请求连续失败{account_manager.error_count}次,账户已永久禁用")
retry_count += 1
# 详细记录错误信息
error_type = type(e).__name__
error_detail = str(e)
# 特殊处理HTTPException,提取状态码和详情
if isinstance(e, HTTPException):
if is_rate_limit:
logger.error(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] 遇到429限流错误,账户将休息{RATE_LIMIT_COOLDOWN_SECONDS}秒")
else:
logger.error(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] HTTP错误 {e.status_code}: {e.detail}")
else:
logger.error(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] {error_type}: {error_detail}")
# 检查是否还能继续重试
if retry_count <= max_retries:
logger.warning(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] 正在重试 ({retry_count}/{max_retries})")
# 尝试切换到其他账户(客户端会传递完整上下文)
try:
# 获取新账户,跳过已失败的账户
max_account_tries = MAX_ACCOUNT_SWITCH_TRIES # 使用配置的账户切换尝试次数
new_account = None
for _ in range(max_account_tries):
candidate = await multi_account_mgr.get_account(None, request_id)
if candidate.config.account_id not in failed_accounts:
new_account = candidate
break
if not new_account:
logger.error(f"[CHAT] [req_{request_id}] 所有账户均已失败,无可用账户")
if req.stream: yield f"data: {json.dumps({'error': {'message': 'All Accounts Failed'}})}\n\n"
return
logger.info(f"[CHAT] [req_{request_id}] 切换账户: {account_manager.config.account_id} -> {new_account.config.account_id}")
# 创建新 Session
new_sess = await create_google_session(new_account, http_client, USER_AGENT, request_id)
# 更新缓存绑定到新账户
await multi_account_mgr.set_session_cache(
conv_key,
new_account.config.account_id,
new_sess
)
# 更新账户管理器
account_manager = new_account
# 设置重试模式(发送完整上下文)
current_retry_mode = True
current_file_ids = [] # 清空 ID,强制重新上传到新 Session
except Exception as create_err:
error_type = type(create_err).__name__
logger.error(f"[CHAT] [req_{request_id}] 账户切换失败 ({error_type}): {str(create_err)}")
# 记录账号池状态(账户切换失败)
uptime_tracker.record_request("account_pool", False)
if req.stream: yield f"data: {json.dumps({'error': {'message': 'Account Failover Failed'}})}\n\n"
return
else:
# 已达到最大重试次数
logger.error(f"[CHAT] [req_{request_id}] 已达到最大重试次数 ({max_retries}),请求失败")
if req.stream: yield f"data: {json.dumps({'error': {'message': f'Max retries ({max_retries}) exceeded: {e}'}})}\n\n"
return
if req.stream:
return StreamingResponse(response_wrapper(), media_type="text/event-stream")
full_content = ""
full_reasoning = ""
async for chunk_str in response_wrapper():
if chunk_str.startswith("data: [DONE]"): break
if chunk_str.startswith("data: "):
try:
data = json.loads(chunk_str[6:])
delta = data["choices"][0]["delta"]
if "content" in delta:
full_content += delta["content"]
if "reasoning_content" in delta:
full_reasoning += delta["reasoning_content"]
except json.JSONDecodeError as e:
logger.error(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] JSON解析失败: {str(e)}")
except (KeyError, IndexError) as e:
logger.error(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] 响应格式错误 ({type(e).__name__}): {str(e)}")
# 构建响应消息
message = {"role": "assistant", "content": full_content}
if full_reasoning:
message["reasoning_content"] = full_reasoning
# 非流式请求完成日志
logger.info(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] 非流式响应完成")
# 记录响应内容(限制500字符)
response_preview = full_content[:500] + "...(已截断)" if len(full_content) > 500 else full_content
logger.info(f"[CHAT] [{account_manager.config.account_id}] [req_{request_id}] AI响应: {response_preview}")
return {
"id": chat_id,
"object": "chat.completion",
"created": created_time,
"model": req.model,
"choices": [{"index": 0, "message": message, "finish_reason": "stop"}],
"usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
}
# ---------- 图片生成处理函数 ----------
def parse_images_from_response(data_list: list) -> tuple[list, str]:
"""从API响应中解析图片文件引用
返回: (file_ids_list, session_name)
file_ids_list: [{"fileId": str, "mimeType": str}, ...]
"""
file_ids = []
session_name = ""
for data in data_list:
sar = data.get("streamAssistResponse")
if not sar:
continue
# 获取session信息(优先使用最新的)
session_info = sar.get("sessionInfo", {})
if session_info.get("session"):
session_name = session_info["session"]
answer = sar.get("answer") or {}
replies = answer.get("replies") or []
for reply in replies:
gc = reply.get("groundedContent", {})
content = gc.get("content", {})
# 检查file字段(图片生成的关键)
file_info = content.get("file")
if file_info and file_info.get("fileId"):
file_ids.append({
"fileId": file_info["fileId"],
"mimeType": file_info.get("mimeType", "image/png")
})
return file_ids, session_name
async def stream_chat_generator(session: str, text_content: str, file_ids: List[str], model_name: str, chat_id: str, created_time: int, account_manager: AccountManager, is_stream: bool = True, request_id: str = "", request: Request = None):
start_time = time.time()
# 记录发送给API的内容
text_preview = text_content[:500] + "...(已截断)" if len(text_content) > 500 else text_content
logger.info(f"[API] [{account_manager.config.account_id}] [req_{request_id}] 发送内容: {text_preview}")
if file_ids:
logger.info(f"[API] [{account_manager.config.account_id}] [req_{request_id}] 附带文件: {len(file_ids)}个")
jwt = await account_manager.get_jwt(request_id)
headers = get_common_headers(jwt, USER_AGENT)
# 构建 toolsSpec(根据配置决定是否启用图片生成)
tools_spec = {
"webGroundingSpec": {},
"toolRegistry": "default_tool_registry",
}
# 只在启用且模型支持时添加图片生成
if IMAGE_GENERATION_ENABLED and model_name in IMAGE_GENERATION_MODELS:
tools_spec["imageGenerationSpec"] = {}
tools_spec["videoGenerationSpec"] = {}
body = {
"configId": account_manager.config.config_id,
"additionalParams": {"token": "-"},
"streamAssistRequest": {
"session": session,
"query": {"parts": [{"text": text_content}]},
"filter": "",
"fileIds": file_ids, # 注入文件 ID
"answerGenerationMode": "NORMAL",
"toolsSpec": tools_spec,
"languageCode": "zh-CN",
"userMetadata": {"timeZone": "Asia/Shanghai"},
"assistSkippingMode": "REQUEST_ASSIST"
}
}
target_model_id = MODEL_MAPPING.get(model_name)
if target_model_id:
body["streamAssistRequest"]["assistGenerationConfig"] = {
"modelId": target_model_id
}
if is_stream:
chunk = create_chunk(chat_id, created_time, model_name, {"role": "assistant"}, None)
yield f"data: {chunk}\n\n"
# 使用流式请求
json_objects = [] # 收集所有响应对象用于图片解析
file_ids_info = None # 保存图片信息
async with http_client.stream(
"POST",
"https://biz-discoveryengine.googleapis.com/v1alpha/locations/global/widgetStreamAssist",
headers=headers,
json=body,
) as r:
if r.status_code != 200:
error_text = await r.aread()
raise HTTPException(status_code=r.status_code, detail=f"Upstream Error {error_text.decode()}")
# 使用异步解析器处理 JSON 数组流
try:
async for json_obj in parse_json_array_stream_async(r.aiter_lines()):
json_objects.append(json_obj) # 收集响应
# 提取文本内容
for reply in json_obj.get("streamAssistResponse", {}).get("answer", {}).get("replies", []):
content_obj = reply.get("groundedContent", {}).get("content", {})
text = content_obj.get("text", "")
if not text:
continue
# 区分思考过程和正常内容
if content_obj.get("thought"):
# 思考过程使用 reasoning_content 字段(类似 OpenAI o1)
chunk = create_chunk(chat_id, created_time, model_name, {"reasoning_content": text}, None)
yield f"data: {chunk}\n\n"
else:
# 正常内容使用 content 字段
chunk = create_chunk(chat_id, created_time, model_name, {"content": text}, None)
yield f"data: {chunk}\n\n"
# 提取图片信息(在 async with 块内)
if json_objects:
file_ids, session_name = parse_images_from_response(json_objects)
if file_ids and session_name:
file_ids_info = (file_ids, session_name)
logger.info(f"[IMAGE] [{account_manager.config.account_id}] [req_{request_id}] 检测到{len(file_ids)}张生成图片")
except ValueError as e:
logger.error(f"[API] [{account_manager.config.account_id}] [req_{request_id}] JSON解析失败: {str(e)}")
except Exception as e:
error_type = type(e).__name__
logger.error(f"[API] [{account_manager.config.account_id}] [req_{request_id}] 流处理错误 ({error_type}): {str(e)}")
raise
# 在 async with 块外处理图片下载(避免占用上游连接)
if file_ids_info:
file_ids, session_name = file_ids_info
try:
base_url = get_base_url(request) if request else ""
file_metadata = await get_session_file_metadata(account_manager, session_name, http_client, USER_AGENT, request_id)
# 并行下载所有图片
download_tasks = []
for file_info in file_ids:
fid = file_info["fileId"]
mime = file_info["mimeType"]
meta = file_metadata.get(fid, {})
correct_session = meta.get("session") or session_name
task = download_image_with_jwt(account_manager, correct_session, fid, http_client, USER_AGENT, request_id)
download_tasks.append((fid, mime, task))
results = await asyncio.gather(*[task for _, _, task in download_tasks], return_exceptions=True)
# 处理下载结果
success_count = 0
for idx, ((fid, mime, _), result) in enumerate(zip(download_tasks, results), 1):
if isinstance(result, Exception):
logger.error(f"[IMAGE] [{account_manager.config.account_id}] [req_{request_id}] 图片{idx}下载失败: {type(result).__name__}: {str(result)[:100]}")
# 降级处理:返回错误提示而不是静默失败
error_msg = f"\n\n⚠️ 图片 {idx} 下载失败\n\n"
chunk = create_chunk(chat_id, created_time, model_name, {"content": error_msg}, None)
yield f"data: {chunk}\n\n"
continue
try:
image_url = save_image_to_hf(result, chat_id, fid, mime, base_url, IMAGE_DIR)
logger.info(f"[IMAGE] [{account_manager.config.account_id}] [req_{request_id}] 图片{idx}已保存: {image_url}")
success_count += 1
markdown = f"\n\n\n\n"
chunk = create_chunk(chat_id, created_time, model_name, {"content": markdown}, None)
yield f"data: {chunk}\n\n"
except Exception as save_error:
logger.error(f"[IMAGE] [{account_manager.config.account_id}] [req_{request_id}] 图片{idx}保存失败: {str(save_error)[:100]}")
error_msg = f"\n\n⚠️ 图片 {idx} 保存失败\n\n"
chunk = create_chunk(chat_id, created_time, model_name, {"content": error_msg}, None)
yield f"data: {chunk}\n\n"
logger.info(f"[IMAGE] [{account_manager.config.account_id}] [req_{request_id}] 图片处理完成: {success_count}/{len(file_ids)} 成功")
except Exception as e:
logger.error(f"[IMAGE] [{account_manager.config.account_id}] [req_{request_id}] 图片处理失败: {type(e).__name__}: {str(e)[:100]}")
# 降级处理:通知用户图片处理失败
error_msg = f"\n\n⚠️ 图片处理失败: {type(e).__name__}\n\n"
chunk = create_chunk(chat_id, created_time, model_name, {"content": error_msg}, None)
yield f"data: {chunk}\n\n"
total_time = time.time() - start_time
logger.info(f"[API] [{account_manager.config.account_id}] [req_{request_id}] 响应完成: {total_time:.2f}秒")
if is_stream:
final_chunk = create_chunk(chat_id, created_time, model_name, {}, "stop")
yield f"data: {final_chunk}\n\n"
yield "data: [DONE]\n\n"
# ---------- 公开端点(无需认证) ----------
@app.get("/public/uptime")
async def get_public_uptime(days: int = 90):
"""获取 Uptime 监控数据(JSON格式)"""
if days < 1 or days > 90:
days = 90
return await uptime_tracker.get_uptime_summary(days)
@app.get("/public/uptime/html")
async def get_public_uptime_html(request: Request):
"""Uptime 监控页面(类似 status.openai.com)"""
return templates.TemplateResponse("public/uptime.html", {"request": request})
@app.get("/public/stats")
async def get_public_stats():
"""获取公开统计信息"""
async with stats_lock:
# 清理1小时前的请求时间戳
current_time = time.time()
global_stats["request_timestamps"] = [
ts for ts in global_stats["request_timestamps"]
if current_time - ts < 3600
]
# 计算每分钟请求数
recent_minute = [
ts for ts in global_stats["request_timestamps"]
if current_time - ts < 60
]
requests_per_minute = len(recent_minute)
# 计算负载状态
if requests_per_minute < 10:
load_status = "low"
load_color = "#10b981" # 绿色
elif requests_per_minute < 30:
load_status = "medium"
load_color = "#f59e0b" # 黄色
else:
load_status = "high"
load_color = "#ef4444" # 红色
return {
"total_visitors": global_stats["total_visitors"],
"total_requests": global_stats["total_requests"],
"requests_per_minute": requests_per_minute,
"load_status": load_status,
"load_color": load_color
}
@app.get("/public/log")
async def get_public_logs(request: Request, limit: int = 100):
"""获取脱敏后的日志(JSON格式)"""
try:
# 基于IP的访问统计(24小时内去重)
# 优先从 X-Forwarded-For 获取真实IP(处理代理情况)
client_ip = request.headers.get("x-forwarded-for")
if client_ip:
# X-Forwarded-For 可能包含多个IP,取第一个
client_ip = client_ip.split(",")[0].strip()
else:
# 没有代理时使用直连IP
client_ip = request.client.host if request.client else "unknown"
current_time = time.time()
async with stats_lock:
# 清理24小时前的IP记录
if "visitor_ips" not in global_stats:
global_stats["visitor_ips"] = {}
expired_ips = [
ip for ip, timestamp in global_stats["visitor_ips"].items()
if current_time - timestamp > 86400 # 24小时
]
for ip in expired_ips:
del global_stats["visitor_ips"][ip]
# 记录新访问(24小时内同一IP只计数一次)
if client_ip not in global_stats["visitor_ips"]:
global_stats["visitor_ips"][client_ip] = current_time
# 同步访问者计数(清理后的实际数量)
global_stats["total_visitors"] = len(global_stats["visitor_ips"])
await save_stats(global_stats)
sanitized_logs = get_sanitized_logs(limit=min(limit, 1000))
return {
"total": len(sanitized_logs),
"logs": sanitized_logs
}
except Exception as e:
logger.error(f"[LOG] 获取公开日志失败: {e}")
return {"total": 0, "logs": [], "error": str(e)}
@app.get("/public/log/html")
async def get_public_logs_html(request: Request):
"""公开的脱敏日志查看器"""
return templates.TemplateResponse("public/logs.html", {
"request": request,
"logo_url": LOGO_URL,
"chat_url": CHAT_URL
})
# ---------- 全局 404 处理(必须在最后) ----------
@app.exception_handler(404)
async def not_found_handler(request: Request, exc: HTTPException):
"""全局 404 处理器"""
return JSONResponse(
status_code=404,
content={"detail": "Not Found"}
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860) |