Spaces:
Running
Running
Upload app.py
Browse files
app.py
CHANGED
|
@@ -8,11 +8,67 @@ import inspect
|
|
| 8 |
import secrets
|
| 9 |
from loguru import logger
|
| 10 |
from pathlib import Path
|
| 11 |
-
|
| 12 |
import requests
|
| 13 |
from flask import Flask, request, Response, jsonify, stream_with_context, render_template, redirect, session
|
| 14 |
from curl_cffi import requests as curl_requests
|
| 15 |
from werkzeug.middleware.proxy_fix import ProxyFix
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
class Logger:
|
| 18 |
def __init__(self, level="INFO", colorize=True, format=None):
|
|
@@ -86,16 +142,16 @@ if not DATA_DIR.exists():
|
|
| 86 |
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
| 87 |
CONFIG = {
|
| 88 |
"MODELS": {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
"grok-3": "grok-3",
|
| 90 |
"grok-3-search": "grok-3",
|
| 91 |
"grok-3-imageGen": "grok-3",
|
| 92 |
"grok-3-deepsearch": "grok-3",
|
| 93 |
"grok-3-deepersearch": "grok-3",
|
| 94 |
-
"grok-3-reasoning": "grok-3"
|
| 95 |
-
'grok-4': 'grok-4',
|
| 96 |
-
'grok-4-reasoning': 'grok-4',
|
| 97 |
-
'grok-4-imageGen': 'grok-4',
|
| 98 |
-
'grok-4-deepsearch': 'grok-4'
|
| 99 |
},
|
| 100 |
"API": {
|
| 101 |
"IS_TEMP_CONVERSATION": os.environ.get("IS_TEMP_CONVERSATION", "true").lower() == "true",
|
|
@@ -122,82 +178,204 @@ CONFIG = {
|
|
| 122 |
"MAX_ATTEMPTS": 2
|
| 123 |
},
|
| 124 |
"TOKEN_STATUS_FILE": str(DATA_DIR / "token_status.json"),
|
| 125 |
-
"SHOW_THINKING": os.environ.get("SHOW_THINKING")
|
| 126 |
"IS_THINKING": False,
|
| 127 |
"IS_IMG_GEN": False,
|
| 128 |
"IS_IMG_GEN2": False,
|
| 129 |
-
"ISSHOW_SEARCH_RESULTS": os.environ.get("ISSHOW_SEARCH_RESULTS", "true").lower() == "true"
|
| 130 |
-
"IS_SUPER_GROK": os.environ.get("IS_SUPER_GROK", "false").lower() == "true"
|
| 131 |
}
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
|
| 153 |
class AuthTokenManager:
|
| 154 |
def __init__(self):
|
| 155 |
self.token_model_map = {}
|
| 156 |
self.expired_tokens = set()
|
| 157 |
self.token_status_map = {}
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
}
|
| 180 |
-
|
| 181 |
-
"
|
| 182 |
-
|
| 183 |
-
"ExpirationTime": 3 * 60 * 60 * 1000 # 3小时
|
| 184 |
-
},
|
| 185 |
-
"grok-3-deepsearch": {
|
| 186 |
-
"RequestFrequency": 10,
|
| 187 |
-
"ExpirationTime": 24 * 60 * 60 * 1000 # 24小时
|
| 188 |
-
},
|
| 189 |
-
"grok-3-deepersearch": {
|
| 190 |
-
"RequestFrequency": 3,
|
| 191 |
-
"ExpirationTime": 24 * 60 * 60 * 1000 # 24小时
|
| 192 |
-
},
|
| 193 |
-
"grok-3-reasoning": {
|
| 194 |
-
"RequestFrequency": 8,
|
| 195 |
-
"ExpirationTime": 24 * 60 * 60 * 1000 # 24小时
|
| 196 |
-
}
|
| 197 |
}
|
| 198 |
-
|
| 199 |
self.token_reset_switch = False
|
| 200 |
self.token_reset_timer = None
|
|
|
|
| 201 |
def save_token_status(self):
|
| 202 |
try:
|
| 203 |
with open(CONFIG["TOKEN_STATUS_FILE"], 'w', encoding='utf-8') as f:
|
|
@@ -215,67 +393,47 @@ class AuthTokenManager:
|
|
| 215 |
logger.info("已从配置文件加载令牌状态", "TokenManager")
|
| 216 |
except Exception as error:
|
| 217 |
logger.error(f"加载令牌状态失败: {str(error)}", "TokenManager")
|
| 218 |
-
def add_token(self,
|
| 219 |
-
|
| 220 |
-
tokenSso = tokens.get("token")
|
| 221 |
-
if tokenType == "normal":
|
| 222 |
-
self.model_config = self.model_normal_config
|
| 223 |
-
else:
|
| 224 |
-
self.model_config = self.model_super_config
|
| 225 |
-
sso = tokenSso.split("sso=")[1].split(";")[0]
|
| 226 |
-
|
| 227 |
for model in self.model_config.keys():
|
| 228 |
if model not in self.token_model_map:
|
| 229 |
self.token_model_map[model] = []
|
| 230 |
if sso not in self.token_status_map:
|
| 231 |
self.token_status_map[sso] = {}
|
| 232 |
|
| 233 |
-
existing_token_entry = next((entry for entry in self.token_model_map[model] if entry["token"] ==
|
| 234 |
|
| 235 |
if not existing_token_entry:
|
| 236 |
self.token_model_map[model].append({
|
| 237 |
-
"token":
|
| 238 |
-
"MaxRequestCount": self.model_config[model]["RequestFrequency"],
|
| 239 |
"RequestCount": 0,
|
| 240 |
"AddedTime": int(time.time() * 1000),
|
| 241 |
-
"StartCallTime": None
|
| 242 |
-
"type": tokenType
|
| 243 |
})
|
| 244 |
|
| 245 |
if model not in self.token_status_map[sso]:
|
| 246 |
self.token_status_map[sso][model] = {
|
| 247 |
"isValid": True,
|
| 248 |
"invalidatedTime": None,
|
| 249 |
-
"totalRequestCount": 0
|
| 250 |
-
"isSuper":tokenType == "super"
|
| 251 |
}
|
| 252 |
if not isinitialization:
|
| 253 |
self.save_token_status()
|
| 254 |
|
| 255 |
-
def set_token(self,
|
| 256 |
-
tokenType = tokens.get("type")
|
| 257 |
-
tokenSso = tokens.get("token")
|
| 258 |
-
if tokenType == "normal":
|
| 259 |
-
self.model_config = self.model_normal_config
|
| 260 |
-
else:
|
| 261 |
-
self.model_config = self.model_super_config
|
| 262 |
-
|
| 263 |
models = list(self.model_config.keys())
|
| 264 |
self.token_model_map = {model: [{
|
| 265 |
-
"token":
|
| 266 |
-
"MaxRequestCount": self.model_config[model]["RequestFrequency"],
|
| 267 |
"RequestCount": 0,
|
| 268 |
"AddedTime": int(time.time() * 1000),
|
| 269 |
-
"StartCallTime": None
|
| 270 |
-
"type": tokenType
|
| 271 |
}] for model in models}
|
| 272 |
|
| 273 |
-
sso =
|
| 274 |
self.token_status_map[sso] = {model: {
|
| 275 |
"isValid": True,
|
| 276 |
"invalidatedTime": None,
|
| 277 |
-
"totalRequestCount": 0
|
| 278 |
-
"isSuper":tokenType == "super"
|
| 279 |
} for model in models}
|
| 280 |
|
| 281 |
def delete_token(self, token):
|
|
@@ -334,15 +492,10 @@ class AuthTokenManager:
|
|
| 334 |
return None
|
| 335 |
|
| 336 |
token_entry = self.token_model_map[normalized_model][0]
|
| 337 |
-
logger.info(f"token_entry: {token_entry}", "TokenManager")
|
| 338 |
if is_return:
|
| 339 |
return token_entry["token"]
|
| 340 |
|
| 341 |
if token_entry:
|
| 342 |
-
if token_entry["type"] == "super":
|
| 343 |
-
self.model_config = self.model_super_config
|
| 344 |
-
else:
|
| 345 |
-
self.model_config = self.model_normal_config
|
| 346 |
if token_entry["StartCallTime"] is None:
|
| 347 |
token_entry["StartCallTime"] = int(time.time() * 1000)
|
| 348 |
|
|
@@ -352,21 +505,18 @@ class AuthTokenManager:
|
|
| 352 |
|
| 353 |
token_entry["RequestCount"] += 1
|
| 354 |
|
| 355 |
-
if token_entry["RequestCount"] >
|
| 356 |
self.remove_token_from_model(normalized_model, token_entry["token"])
|
| 357 |
next_token_entry = self.token_model_map[normalized_model][0] if self.token_model_map[normalized_model] else None
|
| 358 |
return next_token_entry["token"] if next_token_entry else None
|
| 359 |
|
| 360 |
sso = token_entry["token"].split("sso=")[1].split(";")[0]
|
| 361 |
-
|
| 362 |
if sso in self.token_status_map and normalized_model in self.token_status_map[sso]:
|
| 363 |
if token_entry["RequestCount"] == self.model_config[normalized_model]["RequestFrequency"]:
|
| 364 |
self.token_status_map[sso][normalized_model]["isValid"] = False
|
| 365 |
self.token_status_map[sso][normalized_model]["invalidatedTime"] = int(time.time() * 1000)
|
| 366 |
self.token_status_map[sso][normalized_model]["totalRequestCount"] += 1
|
| 367 |
|
| 368 |
-
|
| 369 |
-
|
| 370 |
self.save_token_status()
|
| 371 |
|
| 372 |
return token_entry["token"]
|
|
@@ -388,8 +538,7 @@ class AuthTokenManager:
|
|
| 388 |
self.expired_tokens.add((
|
| 389 |
removed_token_entry["token"],
|
| 390 |
normalized_model,
|
| 391 |
-
int(time.time() * 1000)
|
| 392 |
-
removed_token_entry["type"]
|
| 393 |
))
|
| 394 |
|
| 395 |
if not self.token_reset_switch:
|
|
@@ -406,7 +555,7 @@ class AuthTokenManager:
|
|
| 406 |
return list(self.expired_tokens)
|
| 407 |
|
| 408 |
def normalize_model_name(self, model):
|
| 409 |
-
if model.startswith('grok-') and not
|
| 410 |
return '-'.join(model.split('-')[:2])
|
| 411 |
return model
|
| 412 |
|
|
@@ -419,8 +568,8 @@ class AuthTokenManager:
|
|
| 419 |
|
| 420 |
for model in self.model_config.keys():
|
| 421 |
model_tokens = self.token_model_map.get(model, [])
|
| 422 |
-
|
| 423 |
-
|
| 424 |
total_used_requests = sum(token_entry.get("RequestCount", 0) for token_entry in model_tokens)
|
| 425 |
|
| 426 |
remaining_capacity = (len(model_tokens) * model_request_frequency) - total_used_requests
|
|
@@ -436,13 +585,10 @@ class AuthTokenManager:
|
|
| 436 |
def reset_expired_tokens():
|
| 437 |
now = int(time.time() * 1000)
|
| 438 |
|
| 439 |
-
model_config = self.model_normal_config
|
| 440 |
tokens_to_remove = set()
|
| 441 |
for token_info in self.expired_tokens:
|
| 442 |
-
token, model, expired_time
|
| 443 |
-
|
| 444 |
-
model_config = self.model_super_config
|
| 445 |
-
expiration_time = model_config[model]["ExpirationTime"]
|
| 446 |
|
| 447 |
if now - expired_time >= expiration_time:
|
| 448 |
if not any(entry["token"] == token for entry in self.token_model_map.get(model, [])):
|
|
@@ -451,11 +597,9 @@ class AuthTokenManager:
|
|
| 451 |
|
| 452 |
self.token_model_map[model].append({
|
| 453 |
"token": token,
|
| 454 |
-
"MaxRequestCount": model_config[model]["RequestFrequency"],
|
| 455 |
"RequestCount": 0,
|
| 456 |
"AddedTime": now,
|
| 457 |
-
"StartCallTime": None
|
| 458 |
-
"type": type
|
| 459 |
})
|
| 460 |
|
| 461 |
sso = token.split("sso=")[1].split(";")[0]
|
|
@@ -463,13 +607,12 @@ class AuthTokenManager:
|
|
| 463 |
self.token_status_map[sso][model]["isValid"] = True
|
| 464 |
self.token_status_map[sso][model]["invalidatedTime"] = None
|
| 465 |
self.token_status_map[sso][model]["totalRequestCount"] = 0
|
| 466 |
-
self.token_status_map[sso][model]["isSuper"] = type == "super"
|
| 467 |
|
| 468 |
tokens_to_remove.add(token_info)
|
| 469 |
|
| 470 |
self.expired_tokens -= tokens_to_remove
|
| 471 |
|
| 472 |
-
for model in model_config.keys():
|
| 473 |
if model not in self.token_model_map:
|
| 474 |
continue
|
| 475 |
|
|
@@ -477,14 +620,13 @@ class AuthTokenManager:
|
|
| 477 |
if not token_entry.get("StartCallTime"):
|
| 478 |
continue
|
| 479 |
|
| 480 |
-
expiration_time = model_config[model]["ExpirationTime"]
|
| 481 |
if now - token_entry["StartCallTime"] >= expiration_time:
|
| 482 |
sso = token_entry["token"].split("sso=")[1].split(";")[0]
|
| 483 |
if sso in self.token_status_map and model in self.token_status_map[sso]:
|
| 484 |
self.token_status_map[sso][model]["isValid"] = True
|
| 485 |
self.token_status_map[sso][model]["invalidatedTime"] = None
|
| 486 |
self.token_status_map[sso][model]["totalRequestCount"] = 0
|
| 487 |
-
self.token_status_map[sso][model]["isSuper"] = token_entry["type"] == "super"
|
| 488 |
|
| 489 |
token_entry["RequestCount"] = 0
|
| 490 |
token_entry["StartCallTime"] = None
|
|
@@ -518,6 +660,73 @@ class AuthTokenManager:
|
|
| 518 |
def get_token_status_map(self):
|
| 519 |
return self.token_status_map
|
| 520 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 521 |
class Utils:
|
| 522 |
@staticmethod
|
| 523 |
def organize_search_results(search_results):
|
|
@@ -532,11 +741,43 @@ class Utils:
|
|
| 532 |
url = result.get('url', '#')
|
| 533 |
preview = result.get('preview', '无预览内容')
|
| 534 |
|
| 535 |
-
formatted_result = f"\r\n<details><summary>资料[{index}]: {title}</summary>\r\n{preview}\r\n\n[
|
| 536 |
formatted_results.append(formatted_result)
|
| 537 |
|
| 538 |
return '\n\n'.join(formatted_results)
|
| 539 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 540 |
@staticmethod
|
| 541 |
def create_auth_headers(model, is_return=False):
|
| 542 |
return token_manager.get_next_token_for_model(model, is_return)
|
|
@@ -597,16 +838,24 @@ class GrokApiClient:
|
|
| 597 |
}
|
| 598 |
|
| 599 |
logger.info("发送文字文件请求", "Server")
|
| 600 |
-
cookie = f"{Utils.create_auth_headers(model, True)};{CONFIG['SERVER']['CF_CLEARANCE']}"
|
| 601 |
proxy_options = Utils.get_proxy_options()
|
| 602 |
-
|
| 603 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 604 |
headers={
|
| 605 |
-
**
|
| 606 |
-
"Cookie":cookie
|
| 607 |
},
|
| 608 |
-
json=upload_data,
|
| 609 |
-
impersonate="chrome133a",
|
| 610 |
**proxy_options
|
| 611 |
)
|
| 612 |
|
|
@@ -644,14 +893,22 @@ class GrokApiClient:
|
|
| 644 |
logger.info("发送图片请求", "Server")
|
| 645 |
|
| 646 |
proxy_options = Utils.get_proxy_options()
|
| 647 |
-
|
| 648 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 649 |
headers={
|
| 650 |
-
**
|
| 651 |
-
"Cookie":CONFIG["SERVER"]['COOKIE']
|
| 652 |
},
|
| 653 |
-
json=upload_data,
|
| 654 |
-
impersonate="chrome133a",
|
| 655 |
**proxy_options
|
| 656 |
)
|
| 657 |
|
|
@@ -706,14 +963,13 @@ class GrokApiClient:
|
|
| 706 |
message_length = 0
|
| 707 |
convert_to_file = False
|
| 708 |
last_message_content = ''
|
| 709 |
-
search = request["model"] in ['grok-4-
|
| 710 |
deepsearchPreset = ''
|
| 711 |
if request["model"] == 'grok-3-deepsearch':
|
| 712 |
deepsearchPreset = 'default'
|
| 713 |
elif request["model"] == 'grok-3-deepersearch':
|
| 714 |
deepsearchPreset = 'deeper'
|
| 715 |
|
| 716 |
-
# 移除<think>标签及其内容和base64图片
|
| 717 |
def remove_think_tags(text):
|
| 718 |
import re
|
| 719 |
text = re.sub(r'<think>[\s\S]*?<\/think>', '', text).strip()
|
|
@@ -849,80 +1105,72 @@ class MessageProcessor:
|
|
| 849 |
}
|
| 850 |
|
| 851 |
def process_model_response(response, model):
|
| 852 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 853 |
|
| 854 |
if CONFIG["IS_IMG_GEN"]:
|
| 855 |
if response.get("cachedImageGenerationResponse") and not CONFIG["IS_IMG_GEN2"]:
|
| 856 |
result["imageUrl"] = response["cachedImageGenerationResponse"]["imageUrl"]
|
|
|
|
| 857 |
return result
|
| 858 |
-
if model == 'grok-3':
|
| 859 |
-
result["token"] = response.get("token")
|
| 860 |
-
elif model in ['grok-3-search']:
|
| 861 |
-
if response.get("webSearchResults") and CONFIG["ISSHOW_SEARCH_RESULTS"]:
|
| 862 |
-
result["token"] = f"\r\n<think>{Utils.organize_search_results(response['webSearchResults'])}</think>\r\n"
|
| 863 |
-
else:
|
| 864 |
-
result["token"] = response.get("token")
|
| 865 |
-
elif model in ['grok-3-deepsearch', 'grok-3-deepersearch','grok-4-deepsearch']:
|
| 866 |
-
if response.get("messageStepId") and not CONFIG["SHOW_THINKING"]:
|
| 867 |
-
return result
|
| 868 |
-
if response.get("messageStepId") and not CONFIG["IS_THINKING"]:
|
| 869 |
-
result["token"] = "<think>" + response.get("token", "")
|
| 870 |
-
CONFIG["IS_THINKING"] = True
|
| 871 |
-
elif not response.get("messageStepId") and CONFIG["IS_THINKING"] and response.get("messageTag") == "final":
|
| 872 |
-
result["token"] = "</think>" + response.get("token", "")
|
| 873 |
-
CONFIG["IS_THINKING"] = False
|
| 874 |
-
elif (response.get("messageStepId") and CONFIG["IS_THINKING"] and response.get("messageTag") == "assistant") or response.get("messageTag") == "final":
|
| 875 |
-
result["token"] = response.get("token","")
|
| 876 |
-
elif (CONFIG["IS_THINKING"] and response.get("token","").get("action","") == "webSearch"):
|
| 877 |
-
result["token"] = response.get("token","").get("action_input","").get("query","")
|
| 878 |
-
elif (CONFIG["IS_THINKING"] and response.get("webSearchResults")):
|
| 879 |
-
result["token"] = Utils.organize_search_results(response['webSearchResults'])
|
| 880 |
-
elif model == 'grok-3-reasoning':
|
| 881 |
-
if response.get("isThinking") and not CONFIG["SHOW_THINKING"]:
|
| 882 |
-
return result
|
| 883 |
|
| 884 |
-
|
| 885 |
-
|
| 886 |
-
|
| 887 |
-
|
| 888 |
-
|
| 889 |
-
|
| 890 |
-
else:
|
| 891 |
-
result["token"] = response.get("token")
|
| 892 |
|
| 893 |
-
|
| 894 |
-
|
| 895 |
-
|
| 896 |
-
|
| 897 |
-
|
| 898 |
-
if
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 899 |
return result
|
| 900 |
-
|
| 901 |
-
|
| 902 |
-
|
| 903 |
-
|
| 904 |
-
|
| 905 |
-
|
| 906 |
-
|
| 907 |
-
|
| 908 |
-
|
| 909 |
-
|
|
|
|
|
|
|
| 910 |
return result
|
| 911 |
-
|
| 912 |
-
|
| 913 |
-
|
| 914 |
-
|
| 915 |
-
|
| 916 |
-
CONFIG["IS_THINKING"] = False
|
| 917 |
-
elif (response.get("messageStepId") and CONFIG["IS_THINKING"] and response.get("messageTag") == "assistant") or response.get("messageTag") == "final":
|
| 918 |
-
result["token"] = response.get("token","")
|
| 919 |
-
elif (CONFIG["IS_THINKING"] and response.get("token","").get("action","") == "webSearch"):
|
| 920 |
-
result["token"] = response.get("token","").get("action_input","").get("query","")
|
| 921 |
-
elif (CONFIG["IS_THINKING"] and response.get("webSearchResults")):
|
| 922 |
-
result["token"] = Utils.organize_search_results(response['webSearchResults'])
|
| 923 |
|
| 924 |
return result
|
| 925 |
-
|
| 926 |
def handle_image_response(image_url):
|
| 927 |
max_retries = 2
|
| 928 |
retry_count = 0
|
|
@@ -931,13 +1179,21 @@ def handle_image_response(image_url):
|
|
| 931 |
while retry_count < max_retries:
|
| 932 |
try:
|
| 933 |
proxy_options = Utils.get_proxy_options()
|
| 934 |
-
|
| 935 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 936 |
headers={
|
| 937 |
-
**
|
| 938 |
-
"Cookie":CONFIG["SERVER"]['COOKIE']
|
| 939 |
},
|
| 940 |
-
impersonate="chrome133a",
|
| 941 |
**proxy_options
|
| 942 |
)
|
| 943 |
|
|
@@ -1057,84 +1313,137 @@ def handle_non_stream_response(response, model):
|
|
| 1057 |
except Exception as error:
|
| 1058 |
logger.error(str(error), "Server")
|
| 1059 |
raise
|
| 1060 |
-
def handle_stream_response(response, model):
|
| 1061 |
-
def generate():
|
| 1062 |
-
logger.info("开始处理流式响应", "Server")
|
| 1063 |
-
|
| 1064 |
-
stream = response.iter_lines()
|
| 1065 |
-
CONFIG["IS_THINKING"] = False
|
| 1066 |
-
CONFIG["IS_IMG_GEN"] = False
|
| 1067 |
-
CONFIG["IS_IMG_GEN2"] = False
|
| 1068 |
|
| 1069 |
-
|
| 1070 |
-
|
| 1071 |
-
|
| 1072 |
-
try:
|
| 1073 |
-
line_json = json.loads(chunk.decode("utf-8").strip())
|
| 1074 |
-
print(line_json)
|
| 1075 |
-
if line_json.get("error"):
|
| 1076 |
-
logger.error(json.dumps(line_json, indent=2), "Server")
|
| 1077 |
-
yield json.dumps({"error": "RateLimitError"}) + "\n\n"
|
| 1078 |
-
return
|
| 1079 |
|
| 1080 |
-
|
| 1081 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1082 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1083 |
|
| 1084 |
-
|
| 1085 |
-
|
| 1086 |
-
|
| 1087 |
-
result = process_model_response(response_data, model)
|
| 1088 |
-
|
| 1089 |
-
if result["token"]:
|
| 1090 |
-
yield f"data: {json.dumps(MessageProcessor.create_chat_response(result['token'], model, True))}\n\n"
|
| 1091 |
-
|
| 1092 |
-
if result["imageUrl"]:
|
| 1093 |
-
CONFIG["IS_IMG_GEN2"] = True
|
| 1094 |
-
image_data = handle_image_response(result["imageUrl"])
|
| 1095 |
-
yield f"data: {json.dumps(MessageProcessor.create_chat_response(image_data, model, True))}\n\n"
|
| 1096 |
|
| 1097 |
-
except json.JSONDecodeError:
|
| 1098 |
-
continue
|
| 1099 |
-
except Exception as e:
|
| 1100 |
-
logger.error(f"处理流式响应行时出错: {str(e)}", "Server")
|
| 1101 |
-
continue
|
| 1102 |
|
| 1103 |
-
|
| 1104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1105 |
|
|
|
|
|
|
|
|
|
|
| 1106 |
def initialization():
|
| 1107 |
sso_array = os.environ.get("SSO", "").split(',')
|
| 1108 |
-
sso_array_super = os.environ.get("SSO_SUPER", "").split(',')
|
| 1109 |
-
|
| 1110 |
-
combined_dict = []
|
| 1111 |
-
for value in sso_array_super:
|
| 1112 |
-
combined_dict.append({
|
| 1113 |
-
"token": f"sso-rw={value};sso={value}",
|
| 1114 |
-
"type": "super"
|
| 1115 |
-
})
|
| 1116 |
-
for value in sso_array:
|
| 1117 |
-
combined_dict.append({
|
| 1118 |
-
"token": f"sso-rw={value};sso={value}",
|
| 1119 |
-
"type": "normal"
|
| 1120 |
-
})
|
| 1121 |
-
|
| 1122 |
-
|
| 1123 |
logger.info("开始加载令牌", "Server")
|
| 1124 |
token_manager.load_token_status()
|
| 1125 |
-
for
|
| 1126 |
-
if
|
| 1127 |
-
token_manager.add_token(
|
| 1128 |
token_manager.save_token_status()
|
| 1129 |
|
| 1130 |
logger.info(f"成功加载令牌: {json.dumps(token_manager.get_all_tokens(), indent=2)}", "Server")
|
| 1131 |
-
logger.info(f"令牌加载完成,共加载: {len(
|
| 1132 |
-
logger.info(f"其中共加载: {len(sso_array_super)}个super会员令牌", "Server")
|
| 1133 |
|
| 1134 |
if CONFIG["API"]["PROXY"]:
|
| 1135 |
logger.info(f"代理已设置: {CONFIG['API']['PROXY']}", "Server")
|
| 1136 |
|
| 1137 |
-
|
| 1138 |
|
| 1139 |
|
| 1140 |
app = Flask(__name__)
|
|
@@ -1178,7 +1487,7 @@ def add_manager_token():
|
|
| 1178 |
sso = request.json.get('sso')
|
| 1179 |
if not sso:
|
| 1180 |
return jsonify({"error": "SSO token is required"}), 400
|
| 1181 |
-
token_manager.add_token(
|
| 1182 |
return jsonify({"success": True})
|
| 1183 |
except Exception as e:
|
| 1184 |
return jsonify({"error": str(e)}), 500
|
|
@@ -1229,7 +1538,7 @@ def add_token():
|
|
| 1229 |
|
| 1230 |
try:
|
| 1231 |
sso = request.json.get('sso')
|
| 1232 |
-
token_manager.add_token(
|
| 1233 |
return jsonify(token_manager.get_token_status_map().get(sso, {})), 200
|
| 1234 |
except Exception as error:
|
| 1235 |
logger.error(str(error), "Server")
|
|
@@ -1301,7 +1610,6 @@ def chat_completions():
|
|
| 1301 |
retry_count = 0
|
| 1302 |
grok_client = GrokApiClient(model)
|
| 1303 |
request_payload = grok_client.prepare_chat_request(data)
|
| 1304 |
-
|
| 1305 |
logger.info(json.dumps(request_payload,indent=2))
|
| 1306 |
|
| 1307 |
while retry_count < CONFIG["RETRY"]["MAX_ATTEMPTS"]:
|
|
@@ -1323,16 +1631,25 @@ def chat_completions():
|
|
| 1323 |
logger.info(json.dumps(request_payload,indent=2),"Server")
|
| 1324 |
try:
|
| 1325 |
proxy_options = Utils.get_proxy_options()
|
| 1326 |
-
|
| 1327 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1328 |
headers={
|
| 1329 |
-
**
|
| 1330 |
-
"Cookie":CONFIG["SERVER"]['COOKIE']
|
| 1331 |
},
|
| 1332 |
-
|
| 1333 |
-
|
| 1334 |
-
stream=True,
|
| 1335 |
-
**proxy_options)
|
| 1336 |
logger.info(CONFIG["SERVER"]['COOKIE'],"Server")
|
| 1337 |
if response.status_code == 200:
|
| 1338 |
response_status_code = 200
|
|
@@ -1416,4 +1733,4 @@ if __name__ == '__main__':
|
|
| 1416 |
host='0.0.0.0',
|
| 1417 |
port=CONFIG["SERVER"]["PORT"],
|
| 1418 |
debug=False
|
| 1419 |
-
|
|
|
|
| 8 |
import secrets
|
| 9 |
from loguru import logger
|
| 10 |
from pathlib import Path
|
| 11 |
+
import uuid
|
| 12 |
import requests
|
| 13 |
from flask import Flask, request, Response, jsonify, stream_with_context, render_template, redirect, session
|
| 14 |
from curl_cffi import requests as curl_requests
|
| 15 |
from werkzeug.middleware.proxy_fix import ProxyFix
|
| 16 |
+
from xStatsigIDGenerator import XStatsigIDGenerator
|
| 17 |
+
import random
|
| 18 |
+
import string
|
| 19 |
+
|
| 20 |
+
CORE_WORDS = [
|
| 21 |
+
'__value', '_data-enctype', '_data-margin', '_style', '_transform', '_value',
|
| 22 |
+
'className', 'color', 'currentTime', 'dataset', 'disabled', 'enctype',
|
| 23 |
+
'href', 'innerHTML', 'method', 'multiple', 'name', 'naturalHeight',
|
| 24 |
+
'naturalWidth', 'offsetWidth', 'onclick', 'onerror', 'options', 'padding',
|
| 25 |
+
'paused', 'placeholder', 'position', 'scrollLeft', 'title', 'transform',
|
| 26 |
+
'type', 'width', 'zIndex', 'volume'
|
| 27 |
+
]
|
| 28 |
+
MODIFIERS = [
|
| 29 |
+
'InnerHTML', 'Children', 'Style', 'Options', 'Disabled', 'Onload', 'Volume', 'Alt'
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
def generate_random_part(length_min=4, length_max=8):
|
| 33 |
+
"""生成一段随机乱码"""
|
| 34 |
+
length = random.randint(length_min, length_max)
|
| 35 |
+
return ''.join(random.choices('abcdefghijklmnopqrstuvwxyz0123456789', k=length))
|
| 36 |
+
|
| 37 |
+
def create_error_filler():
|
| 38 |
+
"""根据配方,随机组合出一个报错填充物"""
|
| 39 |
+
roll = random.random()
|
| 40 |
+
|
| 41 |
+
if roll < 0.40: # 40% 的概率: 核心词 + 随机后缀
|
| 42 |
+
word = random.choice(CORE_WORDS)
|
| 43 |
+
suffix = generate_random_part(4, 4)
|
| 44 |
+
return f"{word}_{suffix}"
|
| 45 |
+
elif roll < 0.65: # 25% 的概率: 纯核心词
|
| 46 |
+
return random.choice(CORE_WORDS)
|
| 47 |
+
elif roll < 0.85: # 20% 的概率: 纯随机乱码
|
| 48 |
+
return generate_random_part(8, 12)
|
| 49 |
+
elif roll < 0.95: # 10% 的概率: 核心词 + 数组/索引访问
|
| 50 |
+
word = random.choice(CORE_WORDS)
|
| 51 |
+
if random.random() < 0.5:
|
| 52 |
+
index = random.randint(0, 2)
|
| 53 |
+
return f"{word}[{index}]"
|
| 54 |
+
else:
|
| 55 |
+
index = generate_random_part(4, 4)
|
| 56 |
+
return f"{word}['{index}']"
|
| 57 |
+
else: # 5% 的概率: 核心词 + 修饰词
|
| 58 |
+
word1 = random.choice(CORE_WORDS).capitalize()
|
| 59 |
+
word2 = random.choice(MODIFIERS)
|
| 60 |
+
return f"{word1.lower()}{word2}"
|
| 61 |
+
|
| 62 |
+
def generate_fallback_id():
|
| 63 |
+
"""
|
| 64 |
+
在本地生成一个模拟的、可接受的“错误回退”x-statsig-id。
|
| 65 |
+
这是 soundai.ee 接口的本地化实现。
|
| 66 |
+
"""
|
| 67 |
+
error_filler = create_error_filler()
|
| 68 |
+
error_template = "e:TypeError: Cannot read properties of null (reading '{filler}')"
|
| 69 |
+
error_message = error_template.format(filler=error_filler)
|
| 70 |
+
fallback_id = base64.b64encode(error_message.encode('utf-8')).decode('utf-8')
|
| 71 |
+
return fallback_id
|
| 72 |
|
| 73 |
class Logger:
|
| 74 |
def __init__(self, level="INFO", colorize=True, format=None):
|
|
|
|
| 142 |
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
| 143 |
CONFIG = {
|
| 144 |
"MODELS": {
|
| 145 |
+
'grok-4-heavy': 'grok-4-heavy',
|
| 146 |
+
'grok-4': 'grok-4',
|
| 147 |
+
'grok-4-imageGen': 'grok-4',
|
| 148 |
+
'grok-4-search': 'grok-4',
|
| 149 |
"grok-3": "grok-3",
|
| 150 |
"grok-3-search": "grok-3",
|
| 151 |
"grok-3-imageGen": "grok-3",
|
| 152 |
"grok-3-deepsearch": "grok-3",
|
| 153 |
"grok-3-deepersearch": "grok-3",
|
| 154 |
+
"grok-3-reasoning": "grok-3"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
},
|
| 156 |
"API": {
|
| 157 |
"IS_TEMP_CONVERSATION": os.environ.get("IS_TEMP_CONVERSATION", "true").lower() == "true",
|
|
|
|
| 178 |
"MAX_ATTEMPTS": 2
|
| 179 |
},
|
| 180 |
"TOKEN_STATUS_FILE": str(DATA_DIR / "token_status.json"),
|
| 181 |
+
"SHOW_THINKING": os.environ.get("SHOW_THINKING") == "true",
|
| 182 |
"IS_THINKING": False,
|
| 183 |
"IS_IMG_GEN": False,
|
| 184 |
"IS_IMG_GEN2": False,
|
| 185 |
+
"ISSHOW_SEARCH_RESULTS": os.environ.get("ISSHOW_SEARCH_RESULTS", "true").lower() == "true"
|
|
|
|
| 186 |
}
|
| 187 |
|
| 188 |
+
def generate_statsig_id_fallback():
|
| 189 |
+
"""
|
| 190 |
+
使用自主生成方法作为备用方案生成 x_statsig_id
|
| 191 |
+
"""
|
| 192 |
+
try:
|
| 193 |
+
generator = XStatsigIDGenerator()
|
| 194 |
+
x_statsig_id = generator.generate_x_statsig_id()
|
| 195 |
+
logger.info("使用自主生成方法成功生成 x_statsig_id", "StatsigGenerator")
|
| 196 |
+
return x_statsig_id
|
| 197 |
+
except Exception as e:
|
| 198 |
+
logger.error(f"自主生成 x_statsig_id 失败: {e}", "StatsigGenerator")
|
| 199 |
+
# 如果自主生成也失败,返回一个默认值
|
| 200 |
+
return "fallback-statsig-id-" + str(uuid.uuid4())
|
| 201 |
+
|
| 202 |
+
def get_x_statsig_id_primary():
|
| 203 |
+
"""
|
| 204 |
+
主要策略:优先使用自主生成方法生成 x_statsig_id
|
| 205 |
+
"""
|
| 206 |
+
try:
|
| 207 |
+
logger.info("使用主要策略:自主生成 x_statsig_id", "StatsigGenerator")
|
| 208 |
+
generator = XStatsigIDGenerator()
|
| 209 |
+
x_statsig_id = generator.generate_x_statsig_id()
|
| 210 |
+
logger.info("主要策略成功:自主生成 x_statsig_id 完成", "StatsigGenerator")
|
| 211 |
+
return {
|
| 212 |
+
'success': True,
|
| 213 |
+
'x_statsig_id': x_statsig_id,
|
| 214 |
+
'method': 'self_generated'
|
| 215 |
+
}
|
| 216 |
+
except Exception as e:
|
| 217 |
+
logger.error(f"主要策略失败:自主生成 x_statsig_id 错误: {e}", "StatsigGenerator")
|
| 218 |
+
return {
|
| 219 |
+
'success': False,
|
| 220 |
+
'error': str(e),
|
| 221 |
+
'method': 'self_generated'
|
| 222 |
+
}
|
| 223 |
|
| 224 |
+
def get_x_statsig_id_fallback():
|
| 225 |
+
"""
|
| 226 |
+
备用策略:使用本地生成的“错误回退ID”,替代原来的网络请求。
|
| 227 |
+
"""
|
| 228 |
+
try:
|
| 229 |
+
logger.info("使用备用策略:本地生成错误回退 ID", "StatsigLocal")
|
| 230 |
+
fallback_id = generate_fallback_id()
|
| 231 |
+
|
| 232 |
+
if fallback_id:
|
| 233 |
+
logger.info("备用策略成功:本地生成错误回退 ID 完成", "StatsigLocal")
|
| 234 |
+
return {
|
| 235 |
+
'success': True,
|
| 236 |
+
'x_statsig_id': fallback_id,
|
| 237 |
+
'method': 'local_fallback_generator'
|
| 238 |
+
}
|
| 239 |
+
else:
|
| 240 |
+
# 理论上我们自己的代码不会失败,但还是保留错误处理
|
| 241 |
+
logger.error("备用策略失败:本地生成ID时出错", "StatsigLocal")
|
| 242 |
+
return {
|
| 243 |
+
'success': False,
|
| 244 |
+
'error': '本地生成ID失败',
|
| 245 |
+
'method': 'local_fallback_generator'
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
except Exception as e:
|
| 249 |
+
logger.error(f"备用策略异常:{e}", "StatsigLocal")
|
| 250 |
+
return {
|
| 251 |
+
'success': False,
|
| 252 |
+
'error': str(e),
|
| 253 |
+
'method': 'local_fallback_generator'
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
def get_x_statsig_id():
|
| 257 |
+
"""
|
| 258 |
+
获取 x_statsig_id,优先使用自主生成方法,失败时使用 PHP 接口
|
| 259 |
+
"""
|
| 260 |
+
# 主要策略:自主生成
|
| 261 |
+
primary_result = get_x_statsig_id_primary()
|
| 262 |
+
|
| 263 |
+
if primary_result['success']:
|
| 264 |
+
return primary_result['x_statsig_id']
|
| 265 |
+
|
| 266 |
+
# 备用策略:PHP 接口
|
| 267 |
+
logger.warning("主要策略失败,切换到备用策略", "StatsigStrategy")
|
| 268 |
+
fallback_result = get_x_statsig_id_fallback()
|
| 269 |
+
|
| 270 |
+
if fallback_result['success']:
|
| 271 |
+
return fallback_result['x_statsig_id']
|
| 272 |
+
|
| 273 |
+
# 所有策略都失败,返回默认值
|
| 274 |
+
logger.error("所有策略都失败,使用默认 x_statsig_id", "StatsigStrategy")
|
| 275 |
+
return "fallback-statsig-id-" + str(uuid.uuid4())
|
| 276 |
+
|
| 277 |
+
# 初始化 x_statsig_id(在应用启动时获取一次)
|
| 278 |
+
_cached_x_statsig_id = None
|
| 279 |
+
_cached_x_statsig_id_method = None
|
| 280 |
+
|
| 281 |
+
def get_cached_x_statsig_id():
|
| 282 |
+
"""
|
| 283 |
+
获取缓存的 x_statsig_id,如果没有缓存则重新获取
|
| 284 |
+
"""
|
| 285 |
+
global _cached_x_statsig_id, _cached_x_statsig_id_method
|
| 286 |
+
if _cached_x_statsig_id is None:
|
| 287 |
+
_cached_x_statsig_id = get_x_statsig_id()
|
| 288 |
+
_cached_x_statsig_id_method = 'initial'
|
| 289 |
+
return _cached_x_statsig_id
|
| 290 |
+
|
| 291 |
+
def refresh_x_statsig_id_with_fallback():
|
| 292 |
+
"""
|
| 293 |
+
强制刷新 x_statsig_id,使用备用策略(PHP 接口)
|
| 294 |
+
"""
|
| 295 |
+
global _cached_x_statsig_id, _cached_x_statsig_id_method
|
| 296 |
+
|
| 297 |
+
logger.info("强制刷新 x_statsig_id,使用备用策略", "StatsigStrategy")
|
| 298 |
+
fallback_result = get_x_statsig_id_fallback()
|
| 299 |
+
|
| 300 |
+
if fallback_result['success']:
|
| 301 |
+
_cached_x_statsig_id = fallback_result['x_statsig_id']
|
| 302 |
+
_cached_x_statsig_id_method = 'php_interface'
|
| 303 |
+
logger.info("成功使用备用策略刷新 x_statsig_id", "StatsigStrategy")
|
| 304 |
+
return _cached_x_statsig_id
|
| 305 |
+
else:
|
| 306 |
+
logger.error("备用策略也失败,保持原有 x_statsig_id", "StatsigStrategy")
|
| 307 |
+
return _cached_x_statsig_id
|
| 308 |
+
|
| 309 |
+
def get_default_headers(force_refresh_statsig=False):
|
| 310 |
+
"""
|
| 311 |
+
动态生成默认请求头,确保 X-Statsig-Id 总是可用
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
force_refresh_statsig: 是否强制刷新 x_statsig_id(使用备用策略)
|
| 315 |
+
"""
|
| 316 |
+
if force_refresh_statsig:
|
| 317 |
+
statsig_id = refresh_x_statsig_id_with_fallback()
|
| 318 |
+
else:
|
| 319 |
+
statsig_id = get_cached_x_statsig_id()
|
| 320 |
+
|
| 321 |
+
return {
|
| 322 |
+
'Accept': '*/*',
|
| 323 |
+
'Accept-Language': 'zh-CN,zh;q=0.9',
|
| 324 |
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
| 325 |
+
'Content-Type': 'text/plain;charset=UTF-8',
|
| 326 |
+
'Connection': 'keep-alive',
|
| 327 |
+
'Origin': 'https://grok.com',
|
| 328 |
+
'Priority': 'u=1, i',
|
| 329 |
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36',
|
| 330 |
+
'Sec-Ch-Ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
|
| 331 |
+
'Sec-Ch-Ua-Mobile': '?0',
|
| 332 |
+
'Sec-Ch-Ua-Platform': '"macOS"',
|
| 333 |
+
'Sec-Fetch-Dest': 'empty',
|
| 334 |
+
'Sec-Fetch-Mode': 'cors',
|
| 335 |
+
'Sec-Fetch-Site': 'same-origin',
|
| 336 |
+
'X-Statsig-Id': statsig_id,
|
| 337 |
+
'X-Xai-Request-Id': str(uuid.uuid4()),
|
| 338 |
+
'Baggage': 'sentry-public_key=b311e0f2690c81f25e2c4cf6d4f7ce1c'
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
# 为了向后兼容,保留 DEFAULT_HEADERS 变量
|
| 342 |
+
DEFAULT_HEADERS = get_default_headers()
|
| 343 |
|
| 344 |
class AuthTokenManager:
|
| 345 |
def __init__(self):
|
| 346 |
self.token_model_map = {}
|
| 347 |
self.expired_tokens = set()
|
| 348 |
self.token_status_map = {}
|
| 349 |
+
|
| 350 |
+
self.model_config = {
|
| 351 |
+
"grok-4": {
|
| 352 |
+
"RequestFrequency": 20,
|
| 353 |
+
"ExpirationTime": 24 * 60 * 60 * 1000 # 24小时
|
| 354 |
+
},
|
| 355 |
+
"grok-3": {
|
| 356 |
+
"RequestFrequency": 30,
|
| 357 |
+
"ExpirationTime": 2 * 60 * 60 * 1000 # 2小时
|
| 358 |
+
},
|
| 359 |
+
"grok-3-deepsearch": {
|
| 360 |
+
"RequestFrequency": 30,
|
| 361 |
+
"ExpirationTime": 24 * 60 * 60 * 1000 # 24小时
|
| 362 |
+
},
|
| 363 |
+
"grok-3-deepersearch": {
|
| 364 |
+
"RequestFrequency": 30,
|
| 365 |
+
"ExpirationTime": 24 * 60 * 60 * 1000 # 24小时
|
| 366 |
+
},
|
| 367 |
+
"grok-3-reasoning": {
|
| 368 |
+
"RequestFrequency": 30,
|
| 369 |
+
"ExpirationTime": 24 * 60 * 60 * 1000 # 24小时
|
| 370 |
+
},
|
| 371 |
+
"grok-4-heavy": {
|
| 372 |
+
"RequestFrequency": 20,
|
| 373 |
+
"ExpirationTime": 24 * 60 * 60 * 1000 # 24小时
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 374 |
}
|
| 375 |
+
}
|
| 376 |
self.token_reset_switch = False
|
| 377 |
self.token_reset_timer = None
|
| 378 |
+
self.load_token_status() # 加载令牌状态
|
| 379 |
def save_token_status(self):
|
| 380 |
try:
|
| 381 |
with open(CONFIG["TOKEN_STATUS_FILE"], 'w', encoding='utf-8') as f:
|
|
|
|
| 393 |
logger.info("已从配置文件加载令牌状态", "TokenManager")
|
| 394 |
except Exception as error:
|
| 395 |
logger.error(f"加载令牌状态失败: {str(error)}", "TokenManager")
|
| 396 |
+
def add_token(self, token,isinitialization=False):
|
| 397 |
+
sso = token.split("sso=")[1].split(";")[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
for model in self.model_config.keys():
|
| 399 |
if model not in self.token_model_map:
|
| 400 |
self.token_model_map[model] = []
|
| 401 |
if sso not in self.token_status_map:
|
| 402 |
self.token_status_map[sso] = {}
|
| 403 |
|
| 404 |
+
existing_token_entry = next((entry for entry in self.token_model_map[model] if entry["token"] == token), None)
|
| 405 |
|
| 406 |
if not existing_token_entry:
|
| 407 |
self.token_model_map[model].append({
|
| 408 |
+
"token": token,
|
|
|
|
| 409 |
"RequestCount": 0,
|
| 410 |
"AddedTime": int(time.time() * 1000),
|
| 411 |
+
"StartCallTime": None
|
|
|
|
| 412 |
})
|
| 413 |
|
| 414 |
if model not in self.token_status_map[sso]:
|
| 415 |
self.token_status_map[sso][model] = {
|
| 416 |
"isValid": True,
|
| 417 |
"invalidatedTime": None,
|
| 418 |
+
"totalRequestCount": 0
|
|
|
|
| 419 |
}
|
| 420 |
if not isinitialization:
|
| 421 |
self.save_token_status()
|
| 422 |
|
| 423 |
+
def set_token(self, token):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 424 |
models = list(self.model_config.keys())
|
| 425 |
self.token_model_map = {model: [{
|
| 426 |
+
"token": token,
|
|
|
|
| 427 |
"RequestCount": 0,
|
| 428 |
"AddedTime": int(time.time() * 1000),
|
| 429 |
+
"StartCallTime": None
|
|
|
|
| 430 |
}] for model in models}
|
| 431 |
|
| 432 |
+
sso = token.split("sso=")[1].split(";")[0]
|
| 433 |
self.token_status_map[sso] = {model: {
|
| 434 |
"isValid": True,
|
| 435 |
"invalidatedTime": None,
|
| 436 |
+
"totalRequestCount": 0
|
|
|
|
| 437 |
} for model in models}
|
| 438 |
|
| 439 |
def delete_token(self, token):
|
|
|
|
| 492 |
return None
|
| 493 |
|
| 494 |
token_entry = self.token_model_map[normalized_model][0]
|
|
|
|
| 495 |
if is_return:
|
| 496 |
return token_entry["token"]
|
| 497 |
|
| 498 |
if token_entry:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 499 |
if token_entry["StartCallTime"] is None:
|
| 500 |
token_entry["StartCallTime"] = int(time.time() * 1000)
|
| 501 |
|
|
|
|
| 505 |
|
| 506 |
token_entry["RequestCount"] += 1
|
| 507 |
|
| 508 |
+
if token_entry["RequestCount"] > self.model_config[normalized_model]["RequestFrequency"]:
|
| 509 |
self.remove_token_from_model(normalized_model, token_entry["token"])
|
| 510 |
next_token_entry = self.token_model_map[normalized_model][0] if self.token_model_map[normalized_model] else None
|
| 511 |
return next_token_entry["token"] if next_token_entry else None
|
| 512 |
|
| 513 |
sso = token_entry["token"].split("sso=")[1].split(";")[0]
|
|
|
|
| 514 |
if sso in self.token_status_map and normalized_model in self.token_status_map[sso]:
|
| 515 |
if token_entry["RequestCount"] == self.model_config[normalized_model]["RequestFrequency"]:
|
| 516 |
self.token_status_map[sso][normalized_model]["isValid"] = False
|
| 517 |
self.token_status_map[sso][normalized_model]["invalidatedTime"] = int(time.time() * 1000)
|
| 518 |
self.token_status_map[sso][normalized_model]["totalRequestCount"] += 1
|
| 519 |
|
|
|
|
|
|
|
| 520 |
self.save_token_status()
|
| 521 |
|
| 522 |
return token_entry["token"]
|
|
|
|
| 538 |
self.expired_tokens.add((
|
| 539 |
removed_token_entry["token"],
|
| 540 |
normalized_model,
|
| 541 |
+
int(time.time() * 1000)
|
|
|
|
| 542 |
))
|
| 543 |
|
| 544 |
if not self.token_reset_switch:
|
|
|
|
| 555 |
return list(self.expired_tokens)
|
| 556 |
|
| 557 |
def normalize_model_name(self, model):
|
| 558 |
+
if model.startswith('grok-') and 'deepsearch' not in model and 'reasoning' not in model:
|
| 559 |
return '-'.join(model.split('-')[:2])
|
| 560 |
return model
|
| 561 |
|
|
|
|
| 568 |
|
| 569 |
for model in self.model_config.keys():
|
| 570 |
model_tokens = self.token_model_map.get(model, [])
|
| 571 |
+
model_request_frequency = self.model_config[model]["RequestFrequency"]
|
| 572 |
+
|
| 573 |
total_used_requests = sum(token_entry.get("RequestCount", 0) for token_entry in model_tokens)
|
| 574 |
|
| 575 |
remaining_capacity = (len(model_tokens) * model_request_frequency) - total_used_requests
|
|
|
|
| 585 |
def reset_expired_tokens():
|
| 586 |
now = int(time.time() * 1000)
|
| 587 |
|
|
|
|
| 588 |
tokens_to_remove = set()
|
| 589 |
for token_info in self.expired_tokens:
|
| 590 |
+
token, model, expired_time = token_info
|
| 591 |
+
expiration_time = self.model_config[model]["ExpirationTime"]
|
|
|
|
|
|
|
| 592 |
|
| 593 |
if now - expired_time >= expiration_time:
|
| 594 |
if not any(entry["token"] == token for entry in self.token_model_map.get(model, [])):
|
|
|
|
| 597 |
|
| 598 |
self.token_model_map[model].append({
|
| 599 |
"token": token,
|
|
|
|
| 600 |
"RequestCount": 0,
|
| 601 |
"AddedTime": now,
|
| 602 |
+
"StartCallTime": None
|
|
|
|
| 603 |
})
|
| 604 |
|
| 605 |
sso = token.split("sso=")[1].split(";")[0]
|
|
|
|
| 607 |
self.token_status_map[sso][model]["isValid"] = True
|
| 608 |
self.token_status_map[sso][model]["invalidatedTime"] = None
|
| 609 |
self.token_status_map[sso][model]["totalRequestCount"] = 0
|
|
|
|
| 610 |
|
| 611 |
tokens_to_remove.add(token_info)
|
| 612 |
|
| 613 |
self.expired_tokens -= tokens_to_remove
|
| 614 |
|
| 615 |
+
for model in self.model_config.keys():
|
| 616 |
if model not in self.token_model_map:
|
| 617 |
continue
|
| 618 |
|
|
|
|
| 620 |
if not token_entry.get("StartCallTime"):
|
| 621 |
continue
|
| 622 |
|
| 623 |
+
expiration_time = self.model_config[model]["ExpirationTime"]
|
| 624 |
if now - token_entry["StartCallTime"] >= expiration_time:
|
| 625 |
sso = token_entry["token"].split("sso=")[1].split(";")[0]
|
| 626 |
if sso in self.token_status_map and model in self.token_status_map[sso]:
|
| 627 |
self.token_status_map[sso][model]["isValid"] = True
|
| 628 |
self.token_status_map[sso][model]["invalidatedTime"] = None
|
| 629 |
self.token_status_map[sso][model]["totalRequestCount"] = 0
|
|
|
|
| 630 |
|
| 631 |
token_entry["RequestCount"] = 0
|
| 632 |
token_entry["StartCallTime"] = None
|
|
|
|
| 660 |
def get_token_status_map(self):
|
| 661 |
return self.token_status_map
|
| 662 |
|
| 663 |
+
def smart_grok_request_with_fallback(request_func, *args, **kwargs):
|
| 664 |
+
"""
|
| 665 |
+
智能 Grok API 请求函数,支持 x_statsig_id 降级重试机制
|
| 666 |
+
|
| 667 |
+
Args:
|
| 668 |
+
request_func: 要执行的请求函数
|
| 669 |
+
*args: 请求函数的位置参数
|
| 670 |
+
**kwargs: 请求函数的关键字参数
|
| 671 |
+
|
| 672 |
+
Returns:
|
| 673 |
+
请求结果
|
| 674 |
+
"""
|
| 675 |
+
max_retries = 2 # 最多重试2次(主要策略1次 + 备用策略1次)
|
| 676 |
+
|
| 677 |
+
for attempt in range(max_retries):
|
| 678 |
+
try:
|
| 679 |
+
# 第一次尝试使用当前的 x_statsig_id(可能是自主生成的)
|
| 680 |
+
if attempt == 0:
|
| 681 |
+
logger.info("使用主要策略发起 Grok API 请求", "SmartRequest")
|
| 682 |
+
response = request_func(*args, **kwargs)
|
| 683 |
+
else:
|
| 684 |
+
# 第二次尝试:强制使用备用策略(PHP 接口)刷新 x_statsig_id
|
| 685 |
+
logger.warning("主要策略失败,使用备用策略重新发起 Grok API 请求", "SmartRequest")
|
| 686 |
+
|
| 687 |
+
# 更新 kwargs 中的 headers,强制刷新 x_statsig_id
|
| 688 |
+
if 'headers' in kwargs:
|
| 689 |
+
kwargs['headers'].update(get_default_headers(force_refresh_statsig=True))
|
| 690 |
+
else:
|
| 691 |
+
kwargs['headers'] = get_default_headers(force_refresh_statsig=True)
|
| 692 |
+
|
| 693 |
+
response = request_func(*args, **kwargs)
|
| 694 |
+
|
| 695 |
+
# 检查响应状态码
|
| 696 |
+
if hasattr(response, 'status_code'):
|
| 697 |
+
status_code = response.status_code
|
| 698 |
+
|
| 699 |
+
# 如果是成功状态码,直���返回
|
| 700 |
+
if 200 <= status_code < 300:
|
| 701 |
+
if attempt > 0:
|
| 702 |
+
logger.info(f"备用策略成功:Grok API 请求成功 (状态码: {status_code})", "SmartRequest")
|
| 703 |
+
else:
|
| 704 |
+
logger.info(f"主要策略成功:Grok API 请求成功 (状态码: {status_code})", "SmartRequest")
|
| 705 |
+
return response
|
| 706 |
+
|
| 707 |
+
# 如果是 4xx 或 5xx 错误,且还有重试机会,继续重试
|
| 708 |
+
elif (400 <= status_code < 600) and attempt < max_retries - 1:
|
| 709 |
+
logger.warning(f"Grok API 请求失败 (状态码: {status_code}),尝试使用备用策略", "SmartRequest")
|
| 710 |
+
continue
|
| 711 |
+
else:
|
| 712 |
+
# 最后一次重试也失败了
|
| 713 |
+
logger.error(f"所有策略都失败:Grok API 请求失败 (状态码: {status_code})", "SmartRequest")
|
| 714 |
+
return response
|
| 715 |
+
else:
|
| 716 |
+
# 没有 status_code 属性,直接返回响应
|
| 717 |
+
return response
|
| 718 |
+
|
| 719 |
+
except Exception as e:
|
| 720 |
+
if attempt < max_retries - 1:
|
| 721 |
+
logger.warning(f"Grok API 请求异常: {e},尝试使用备用策略", "SmartRequest")
|
| 722 |
+
continue
|
| 723 |
+
else:
|
| 724 |
+
logger.error(f"所有策略都失败:Grok API 请求异常: {e}", "SmartRequest")
|
| 725 |
+
raise
|
| 726 |
+
|
| 727 |
+
# 理论上不会到达这里
|
| 728 |
+
return None
|
| 729 |
+
|
| 730 |
class Utils:
|
| 731 |
@staticmethod
|
| 732 |
def organize_search_results(search_results):
|
|
|
|
| 741 |
url = result.get('url', '#')
|
| 742 |
preview = result.get('preview', '无预览内容')
|
| 743 |
|
| 744 |
+
formatted_result = f"\r\n<details><summary>资料[{index}]: {title}</summary>\r\n{preview}\r\n\n[{title}]({url})\r\n</details>\n"
|
| 745 |
formatted_results.append(formatted_result)
|
| 746 |
|
| 747 |
return '\n\n'.join(formatted_results)
|
| 748 |
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
@staticmethod
|
| 752 |
+
def safe_filter_grok_tags(text):
|
| 753 |
+
"""
|
| 754 |
+
只移除 <xai:tool_usage_card> 标签
|
| 755 |
+
"""
|
| 756 |
+
if not text or not isinstance(text, str):
|
| 757 |
+
return text
|
| 758 |
+
|
| 759 |
+
start_tag, end_tag = ("<xai:tool_usage_card>", "</xai:tool_usage_card>")
|
| 760 |
+
|
| 761 |
+
if start_tag in text:
|
| 762 |
+
original_text = text
|
| 763 |
+
while True:
|
| 764 |
+
end_index = text.rfind(end_tag)
|
| 765 |
+
if end_index == -1:
|
| 766 |
+
break
|
| 767 |
+
|
| 768 |
+
start_index = text.rfind(start_tag, 0, end_index)
|
| 769 |
+
if start_index == -1:
|
| 770 |
+
break
|
| 771 |
+
|
| 772 |
+
text = text[:start_index] + text[end_index + len(end_tag):]
|
| 773 |
+
|
| 774 |
+
if text != original_text:
|
| 775 |
+
return text.strip()
|
| 776 |
+
else:
|
| 777 |
+
return text
|
| 778 |
+
else:
|
| 779 |
+
return text
|
| 780 |
+
|
| 781 |
@staticmethod
|
| 782 |
def create_auth_headers(model, is_return=False):
|
| 783 |
return token_manager.get_next_token_for_model(model, is_return)
|
|
|
|
| 838 |
}
|
| 839 |
|
| 840 |
logger.info("发送文字文件请求", "Server")
|
| 841 |
+
cookie = f"{Utils.create_auth_headers(model, True)};{CONFIG['SERVER']['CF_CLEARANCE']}"
|
| 842 |
proxy_options = Utils.get_proxy_options()
|
| 843 |
+
|
| 844 |
+
# 使用智能重试机制发起文件上传请求
|
| 845 |
+
def make_upload_request(**request_kwargs):
|
| 846 |
+
return curl_requests.post(
|
| 847 |
+
"https://grok.com/rest/app-chat/upload-file",
|
| 848 |
+
json=upload_data,
|
| 849 |
+
impersonate="chrome133a",
|
| 850 |
+
**request_kwargs
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
response = smart_grok_request_with_fallback(
|
| 854 |
+
make_upload_request,
|
| 855 |
headers={
|
| 856 |
+
**get_default_headers(),
|
| 857 |
+
"Cookie": cookie
|
| 858 |
},
|
|
|
|
|
|
|
| 859 |
**proxy_options
|
| 860 |
)
|
| 861 |
|
|
|
|
| 893 |
logger.info("发送图片请求", "Server")
|
| 894 |
|
| 895 |
proxy_options = Utils.get_proxy_options()
|
| 896 |
+
|
| 897 |
+
# 使用智能重试机制发起图片上传请求
|
| 898 |
+
def make_image_upload_request(**request_kwargs):
|
| 899 |
+
return curl_requests.post(
|
| 900 |
+
url,
|
| 901 |
+
json=upload_data,
|
| 902 |
+
impersonate="chrome133a",
|
| 903 |
+
**request_kwargs
|
| 904 |
+
)
|
| 905 |
+
|
| 906 |
+
response = smart_grok_request_with_fallback(
|
| 907 |
+
make_image_upload_request,
|
| 908 |
headers={
|
| 909 |
+
**get_default_headers(),
|
| 910 |
+
"Cookie": CONFIG["SERVER"]['COOKIE']
|
| 911 |
},
|
|
|
|
|
|
|
| 912 |
**proxy_options
|
| 913 |
)
|
| 914 |
|
|
|
|
| 963 |
message_length = 0
|
| 964 |
convert_to_file = False
|
| 965 |
last_message_content = ''
|
| 966 |
+
search = request["model"] in ['grok-4-search', 'grok-3-search']
|
| 967 |
deepsearchPreset = ''
|
| 968 |
if request["model"] == 'grok-3-deepsearch':
|
| 969 |
deepsearchPreset = 'default'
|
| 970 |
elif request["model"] == 'grok-3-deepersearch':
|
| 971 |
deepsearchPreset = 'deeper'
|
| 972 |
|
|
|
|
| 973 |
def remove_think_tags(text):
|
| 974 |
import re
|
| 975 |
text = re.sub(r'<think>[\s\S]*?<\/think>', '', text).strip()
|
|
|
|
| 1105 |
}
|
| 1106 |
|
| 1107 |
def process_model_response(response, model):
|
| 1108 |
+
"""
|
| 1109 |
+
处理来自 Grok API 的流式响应块。
|
| 1110 |
+
此版本精准修复了非 Agent 模型(如 grok-3)会重复输出内容的问题,
|
| 1111 |
+
方法是在接收到最终 modelResponse 块时将其忽略。
|
| 1112 |
+
所有针对 Agent 模型的处理逻辑保持原始状态不变。
|
| 1113 |
+
"""
|
| 1114 |
+
result = {"token": None, "type": None}
|
| 1115 |
+
AGENT_MODELS = ['grok-4-heavy', 'grok-4', 'grok-3-deepersearch', 'grok-3-deepsearch']
|
| 1116 |
|
| 1117 |
if CONFIG["IS_IMG_GEN"]:
|
| 1118 |
if response.get("cachedImageGenerationResponse") and not CONFIG["IS_IMG_GEN2"]:
|
| 1119 |
result["imageUrl"] = response["cachedImageGenerationResponse"]["imageUrl"]
|
| 1120 |
+
result["type"] = 'image_url'
|
| 1121 |
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1122 |
|
| 1123 |
+
message_tag = response.get("messageTag")
|
| 1124 |
+
token = response.get("token")
|
| 1125 |
+
|
| 1126 |
+
if message_tag == 'heartbeat':
|
| 1127 |
+
result["type"] = 'heartbeat'
|
| 1128 |
+
return result
|
|
|
|
|
|
|
| 1129 |
|
| 1130 |
+
if model not in AGENT_MODELS and response.get("modelResponse"):
|
| 1131 |
+
return result
|
| 1132 |
+
|
| 1133 |
+
if response.get("modelResponse") and isinstance(response["modelResponse"], dict):
|
| 1134 |
+
final_message = response["modelResponse"].get("message")
|
| 1135 |
+
if final_message:
|
| 1136 |
+
result["token"] = Utils.safe_filter_grok_tags(final_message)
|
| 1137 |
+
result["type"] = 'content'
|
| 1138 |
+
return result
|
| 1139 |
+
|
| 1140 |
+
if model in AGENT_MODELS:
|
| 1141 |
+
THINKING_TAGS = {'header', 'summary', 'raw_function_result', 'citedWebSearchResults'}
|
| 1142 |
+
if message_tag in THINKING_TAGS:
|
| 1143 |
+
content_to_filter = None
|
| 1144 |
+
if token: content_to_filter = token
|
| 1145 |
+
elif response.get('webSearchResults') and CONFIG["ISSHOW_SEARCH_RESULTS"]:
|
| 1146 |
+
content_to_filter = Utils.organize_search_results(response['webSearchResults'])
|
| 1147 |
+
|
| 1148 |
+
if content_to_filter:
|
| 1149 |
+
filtered_content = Utils.safe_filter_grok_tags(content_to_filter)
|
| 1150 |
+
if filtered_content:
|
| 1151 |
+
result["token"] = filtered_content
|
| 1152 |
+
result["type"] = 'thinking'
|
| 1153 |
return result
|
| 1154 |
+
|
| 1155 |
+
if model in AGENT_MODELS:
|
| 1156 |
+
is_verbose_thinking = (response.get("isThinking") or response.get("messageStepId")) and message_tag not in {'header', 'summary'}
|
| 1157 |
+
if is_verbose_thinking:
|
| 1158 |
+
if not CONFIG["SHOW_THINKING"]:
|
| 1159 |
+
result["type"] = 'heartbeat'
|
| 1160 |
+
return result
|
| 1161 |
+
elif token:
|
| 1162 |
+
filtered_token = Utils.safe_filter_grok_tags(token)
|
| 1163 |
+
if filtered_token:
|
| 1164 |
+
result["token"] = filtered_token
|
| 1165 |
+
result["type"] = 'thinking'
|
| 1166 |
return result
|
| 1167 |
+
|
| 1168 |
+
if model not in AGENT_MODELS and token:
|
| 1169 |
+
result["token"] = Utils.safe_filter_grok_tags(token)
|
| 1170 |
+
result["type"] = 'content'
|
| 1171 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1172 |
|
| 1173 |
return result
|
|
|
|
| 1174 |
def handle_image_response(image_url):
|
| 1175 |
max_retries = 2
|
| 1176 |
retry_count = 0
|
|
|
|
| 1179 |
while retry_count < max_retries:
|
| 1180 |
try:
|
| 1181 |
proxy_options = Utils.get_proxy_options()
|
| 1182 |
+
|
| 1183 |
+
# 使用智能重试机制发起图片下载请求
|
| 1184 |
+
def make_image_download_request(**request_kwargs):
|
| 1185 |
+
return curl_requests.get(
|
| 1186 |
+
f"https://assets.grok.com/{image_url}",
|
| 1187 |
+
impersonate="chrome133a",
|
| 1188 |
+
**request_kwargs
|
| 1189 |
+
)
|
| 1190 |
+
|
| 1191 |
+
image_base64_response = smart_grok_request_with_fallback(
|
| 1192 |
+
make_image_download_request,
|
| 1193 |
headers={
|
| 1194 |
+
**get_default_headers(),
|
| 1195 |
+
"Cookie": CONFIG["SERVER"]['COOKIE']
|
| 1196 |
},
|
|
|
|
| 1197 |
**proxy_options
|
| 1198 |
)
|
| 1199 |
|
|
|
|
| 1313 |
except Exception as error:
|
| 1314 |
logger.error(str(error), "Server")
|
| 1315 |
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1316 |
|
| 1317 |
+
def handle_stream_response(response, model):
|
| 1318 |
+
|
| 1319 |
+
AGENT_MODELS = ['grok-4-heavy', 'grok-4', 'grok-3-deepersearch', 'grok-3-deepsearch']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1320 |
|
| 1321 |
+
# ================= 分支 A: Agent 模型的专用处理逻辑 =================
|
| 1322 |
+
if model in AGENT_MODELS:
|
| 1323 |
+
def generate_agent():
|
| 1324 |
+
logger.info(f"使用 Agent 模型专用逻辑处理: {model}", "Server")
|
| 1325 |
+
stream = response.iter_lines()
|
| 1326 |
+
|
| 1327 |
+
is_in_think_block = False
|
| 1328 |
+
final_answer_started = False
|
| 1329 |
+
|
| 1330 |
+
# Agent 模型的内部辅助函数
|
| 1331 |
+
def yield_agent_content(content_to_yield, content_type='content'):
|
| 1332 |
+
nonlocal is_in_think_block
|
| 1333 |
+
is_thinking_content = content_type == 'thinking'
|
| 1334 |
+
|
| 1335 |
+
if is_thinking_content and not is_in_think_block:
|
| 1336 |
+
is_in_think_block = True
|
| 1337 |
+
payload = MessageProcessor.create_chat_response('<think>\n', model, True)
|
| 1338 |
+
yield f"data: {json.dumps(payload)}\n\n"
|
| 1339 |
+
|
| 1340 |
+
elif not is_thinking_content and is_in_think_block:
|
| 1341 |
+
is_in_think_block = False
|
| 1342 |
+
payload = MessageProcessor.create_chat_response('</think>\n\n', model, True)
|
| 1343 |
+
json_payload = json.dumps(payload)
|
| 1344 |
+
yield f"data: {json_payload}\n\n"
|
| 1345 |
+
|
| 1346 |
+
if content_to_yield:
|
| 1347 |
+
payload = MessageProcessor.create_chat_response(content_to_yield, model, True)
|
| 1348 |
+
yield f"data: {json.dumps(payload)}\n\n"
|
| 1349 |
+
|
| 1350 |
+
for chunk in stream:
|
| 1351 |
+
if not chunk: continue
|
| 1352 |
+
try:
|
| 1353 |
+
line_json = json.loads(chunk.decode("utf-8").strip())
|
| 1354 |
+
if line_json.get("error"): continue
|
| 1355 |
+
|
| 1356 |
+
response_data = line_json.get("result", {}).get("response")
|
| 1357 |
+
if not response_data: continue
|
| 1358 |
+
|
| 1359 |
+
if response_data.get("modelResponse") and isinstance(response_data["modelResponse"], dict):
|
| 1360 |
+
final_answer_started = True
|
| 1361 |
+
for part in yield_agent_content(None, content_type='content'):
|
| 1362 |
+
yield part
|
| 1363 |
+
|
| 1364 |
+
final_message = response_data["modelResponse"].get("message")
|
| 1365 |
+
if final_message:
|
| 1366 |
+
clean_message = Utils.safe_filter_grok_tags(final_message)
|
| 1367 |
+
payload = MessageProcessor.create_chat_response(clean_message, model, True)
|
| 1368 |
+
yield f"data: {json.dumps(payload)}\n\n"
|
| 1369 |
+
break
|
| 1370 |
+
|
| 1371 |
+
result = process_model_response(response_data, model)
|
| 1372 |
+
if result.get("type") == 'heartbeat':
|
| 1373 |
+
yield ":ping\n\n"
|
| 1374 |
+
elif result.get("type") == 'thinking':
|
| 1375 |
+
for part in yield_agent_content(result.get("token"), content_type='thinking'):
|
| 1376 |
+
yield part
|
| 1377 |
+
|
| 1378 |
+
except Exception as e:
|
| 1379 |
+
logger.error(f"处理 Agent 流时出错: {str(e)}", "Server")
|
| 1380 |
continue
|
| 1381 |
+
|
| 1382 |
+
if is_in_think_block and not final_answer_started:
|
| 1383 |
+
payload = MessageProcessor.create_chat_response('</think>\n\n', model, True)
|
| 1384 |
+
json_payload = json.dumps(payload)
|
| 1385 |
+
yield f"data: {json_payload}\n\n"
|
| 1386 |
|
| 1387 |
+
yield "data: [DONE]\n\n"
|
| 1388 |
+
return generate_agent()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1389 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1390 |
|
| 1391 |
+
# ================= 分支 B: 标准模型的逻辑 =================
|
| 1392 |
+
else:
|
| 1393 |
+
def generate_standard_fixed():
|
| 1394 |
+
logger.info(f"使用标准模型逻辑处理 (精准模仿作者图片处理): {model}", "Server")
|
| 1395 |
+
stream = response.iter_lines()
|
| 1396 |
+
|
| 1397 |
+
CONFIG["IS_IMG_GEN"] = False
|
| 1398 |
+
CONFIG["IS_IMG_GEN2"] = False
|
| 1399 |
+
|
| 1400 |
+
for chunk in stream:
|
| 1401 |
+
if not chunk: continue
|
| 1402 |
+
try:
|
| 1403 |
+
line_json = json.loads(chunk.decode("utf-8").strip())
|
| 1404 |
+
if line_json.get("error"): continue
|
| 1405 |
+
|
| 1406 |
+
response_data = line_json.get("result", {}).get("response")
|
| 1407 |
+
if not response_data: continue
|
| 1408 |
+
|
| 1409 |
+
if response_data.get("doImgGen") or response_data.get("imageAttachmentInfo"):
|
| 1410 |
+
CONFIG["IS_IMG_GEN"] = True
|
| 1411 |
+
|
| 1412 |
+
result = process_model_response(response_data, model)
|
| 1413 |
+
|
| 1414 |
+
if result.get("token"):
|
| 1415 |
+
payload = MessageProcessor.create_chat_response(result["token"], model, True)
|
| 1416 |
+
yield f"data: {json.dumps(payload)}\n\n"
|
| 1417 |
+
|
| 1418 |
+
if result.get("imageUrl"):
|
| 1419 |
+
CONFIG["IS_IMG_GEN2"] = True
|
| 1420 |
+
image_data = handle_image_response(result["imageUrl"])
|
| 1421 |
+
payload = MessageProcessor.create_chat_response(image_data, model, True)
|
| 1422 |
+
yield f"data: {json.dumps(payload)}\n\n"
|
| 1423 |
+
|
| 1424 |
+
except Exception as e:
|
| 1425 |
+
logger.error(f"处理标准流时出错: {str(e)}", "Server")
|
| 1426 |
+
continue
|
| 1427 |
|
| 1428 |
+
yield "data: [DONE]\n\n"
|
| 1429 |
+
|
| 1430 |
+
return generate_standard_fixed()
|
| 1431 |
def initialization():
|
| 1432 |
sso_array = os.environ.get("SSO", "").split(',')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1433 |
logger.info("开始加载令牌", "Server")
|
| 1434 |
token_manager.load_token_status()
|
| 1435 |
+
for sso in sso_array:
|
| 1436 |
+
if sso:
|
| 1437 |
+
token_manager.add_token(f"sso-rw={sso};sso={sso}",True)
|
| 1438 |
token_manager.save_token_status()
|
| 1439 |
|
| 1440 |
logger.info(f"成功加载令牌: {json.dumps(token_manager.get_all_tokens(), indent=2)}", "Server")
|
| 1441 |
+
logger.info(f"令牌加载完成,共加载: {len(token_manager.get_all_tokens())}个令牌", "Server")
|
|
|
|
| 1442 |
|
| 1443 |
if CONFIG["API"]["PROXY"]:
|
| 1444 |
logger.info(f"代理已设置: {CONFIG['API']['PROXY']}", "Server")
|
| 1445 |
|
| 1446 |
+
logger.info("初始化完成", "Server")
|
| 1447 |
|
| 1448 |
|
| 1449 |
app = Flask(__name__)
|
|
|
|
| 1487 |
sso = request.json.get('sso')
|
| 1488 |
if not sso:
|
| 1489 |
return jsonify({"error": "SSO token is required"}), 400
|
| 1490 |
+
token_manager.add_token(f"sso-rw={sso};sso={sso}")
|
| 1491 |
return jsonify({"success": True})
|
| 1492 |
except Exception as e:
|
| 1493 |
return jsonify({"error": str(e)}), 500
|
|
|
|
| 1538 |
|
| 1539 |
try:
|
| 1540 |
sso = request.json.get('sso')
|
| 1541 |
+
token_manager.add_token(f"sso-rw={sso};sso={sso}")
|
| 1542 |
return jsonify(token_manager.get_token_status_map().get(sso, {})), 200
|
| 1543 |
except Exception as error:
|
| 1544 |
logger.error(str(error), "Server")
|
|
|
|
| 1610 |
retry_count = 0
|
| 1611 |
grok_client = GrokApiClient(model)
|
| 1612 |
request_payload = grok_client.prepare_chat_request(data)
|
|
|
|
| 1613 |
logger.info(json.dumps(request_payload,indent=2))
|
| 1614 |
|
| 1615 |
while retry_count < CONFIG["RETRY"]["MAX_ATTEMPTS"]:
|
|
|
|
| 1631 |
logger.info(json.dumps(request_payload,indent=2),"Server")
|
| 1632 |
try:
|
| 1633 |
proxy_options = Utils.get_proxy_options()
|
| 1634 |
+
|
| 1635 |
+
# 使用智能重试机制发起请求
|
| 1636 |
+
def make_grok_request(**request_kwargs):
|
| 1637 |
+
return curl_requests.post(
|
| 1638 |
+
f"{CONFIG['API']['BASE_URL']}/rest/app-chat/conversations/new",
|
| 1639 |
+
data=json.dumps(request_payload),
|
| 1640 |
+
impersonate="chrome133a",
|
| 1641 |
+
stream=True,
|
| 1642 |
+
**request_kwargs
|
| 1643 |
+
)
|
| 1644 |
+
|
| 1645 |
+
response = smart_grok_request_with_fallback(
|
| 1646 |
+
make_grok_request,
|
| 1647 |
headers={
|
| 1648 |
+
**get_default_headers(),
|
| 1649 |
+
"Cookie": CONFIG["SERVER"]['COOKIE']
|
| 1650 |
},
|
| 1651 |
+
**proxy_options
|
| 1652 |
+
)
|
|
|
|
|
|
|
| 1653 |
logger.info(CONFIG["SERVER"]['COOKIE'],"Server")
|
| 1654 |
if response.status_code == 200:
|
| 1655 |
response_status_code = 200
|
|
|
|
| 1733 |
host='0.0.0.0',
|
| 1734 |
port=CONFIG["SERVER"]["PORT"],
|
| 1735 |
debug=False
|
| 1736 |
+
)
|