Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, Request, Response, HTTPException | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import StreamingResponse | |
| import httpx | |
| import json | |
| import uuid | |
| from typing import Optional, List, Dict, Any | |
| from pydantic import BaseModel | |
| import asyncio | |
| # 创建FastAPI应用 | |
| app = FastAPI() | |
| # 配置CORS | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # 定义数据模型 | |
| class Message(BaseModel): | |
| role: str | |
| content: str | |
| class ChatRequest(BaseModel): | |
| messages: List[Message] | |
| model: str | |
| stream: Optional[bool] = True | |
| class ChatResponse(BaseModel): | |
| id: str | |
| object: str = "chat.completion" | |
| created: int | |
| model: str | |
| choices: List[Dict[str, Any]] | |
| usage: Optional[Dict[str, int]] = None | |
| # 模型映射 | |
| MODEL_MAPPING = { | |
| "gpt-4o-mini-abacus": "OPENAI_GPT4O_MINI", | |
| "claude-3.5-sonnet-abacus": "CLAUDE_V3_5_SONNET", | |
| "claude-3.7-sonnet-abacus": "CLAUDE_V3_7_SONNET", | |
| "claude-3.7-sonnet-thinking-abacus": "CLAUDE_V3_7_SONNET_THINKING", | |
| "o3-mini-abacus": "OPENAI_O3_MINI", | |
| "o3-mini-high-abacus": "OPENAI_O3_MINI_HIGH", | |
| "o1-mini-abacus": "OPENAI_O1_MINI", | |
| "deepseek-r1-abacus": "DEEPSEEK_R1", | |
| "gemini-2-pro-abacus": "GEMINI_2_PRO", | |
| "gemini-2-flash-thinking-abacus": "GEMINI_2_FLASH_THINKING", | |
| "gemini-2-flash-abacus": "GEMINI_2_FLASH", | |
| "gemini-1.5-pro-abacus": "GEMINI_1_5_PRO", | |
| "xai-grok-abacus": "XAI_GROK", | |
| "deepseek-v3-abacus": "DEEPSEEK_V3", | |
| "llama3-1-405b-abacus": "LLAMA3_1_405B", | |
| "gpt-4o-abacus": "OPENAI_GPT4O", | |
| "gpt-4o-2024-08-06-abacus": "OPENAI_GPT4O", | |
| "gpt-3.5-turbo-abacus": "OPENAI_O3_MINI", | |
| "gpt-3.5-turbo-16k-abacus": "OPENAI_O3_MINI_HIGH" | |
| } | |
| BASE_URL = "https://pa002.abacus.ai" | |
| TIMEOUT = 30.0 # 请求超时时间(秒) | |
| MAX_RETRIES = 1 # 最大重试次数 | |
| RETRY_DELAY = 1 # 重试延迟(秒) | |
| async def list_models(): | |
| """返回支持的模型列表""" | |
| models = [ | |
| { | |
| "id": model_id, | |
| "object": "model", | |
| "created": 1677610602, | |
| "owned_by": "system", | |
| } | |
| for model_id in MODEL_MAPPING.keys() | |
| ] | |
| return { | |
| "object": "list", | |
| "data": models | |
| } | |
| # 工具函数:获取请求头 | |
| def get_headers(auth_token: str) -> Dict[str, str]: | |
| """生成请求头""" | |
| return { | |
| "authority": "apps.abacus.ai", | |
| "method": "POST", | |
| "path": "/api/_chatLLMSendMessageSSE", | |
| "scheme": "https", | |
| "accept": "text/event-stream", | |
| "accept-encoding": "gzip, deflate, br, zstd", | |
| "accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,zh-TW;q=0.6,ja;q=0.5", | |
| "baggage": "sentry-environment=production,sentry-release=0bfa96da0a4b582e2267a59e9b274f5e47b5d80b,sentry-public_key=3476ea6df1585dd10e92cdae3a66ff49,sentry-trace_id=986212bc735b4c4ab303957fb581a822", | |
| # "content-length": "266", | |
| "content-type": "text/plain;charset=UTF-8", | |
| "cookie": auth_token, | |
| "origin": "https://apps.abacus.ai", | |
| "priority": "u=1, i", | |
| # "referer": "https://apps.abacus.ai/chatllm/?appId=c9a23c6d8&convoId=d165551ca", | |
| "sec-ch-ua": '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"', | |
| "sec-ch-ua-mobile": "?0", | |
| "sec-ch-ua-platform": "Windows", | |
| "sec-fetch-dest": "empty", | |
| "sec-fetch-mode": "cors", | |
| "sec-fetch-site": "same-origin", | |
| "sentry-trace": "986212bc735b4c4ab303957fb581a822-9975e9d4ccb80d92", | |
| "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36", | |
| "x-abacus-org-host": "apps" | |
| } | |
| # return { | |
| # "sec-ch-ua-platform": "Windows", | |
| # "sec-ch-ua": '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"', | |
| # "sec-ch-ua-mobile": "?0", | |
| # "X-Abacus-Org-Host": "apps", | |
| # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36", | |
| # "Sec-Fetch-Site": "same-site", | |
| # "Sec-Fetch-Mode": "cors", | |
| # "Sec-Fetch-Dest": "empty", | |
| # "host": "pa002.abacus.ai", | |
| # "Cookie": auth_token, | |
| # "Accept": "text/event-stream", | |
| # "Content-Type": "text/plain;charset=UTF-8" | |
| # } | |
| def process_messages(messages: List[Message]) -> str: | |
| """处理消息列表,合并成单个消息""" | |
| system_message = next((msg.content for msg in messages if msg.role == "system"), None) | |
| context_messages = [msg for msg in messages if msg.role != "system"][:-1] | |
| current_message = messages[-1].content | |
| full_message = current_message | |
| if system_message: | |
| full_message = f"System: {system_message}\n\n{full_message}" | |
| if context_messages: | |
| context_str = "\n".join(f"{msg.role}: {msg.content}" for msg in context_messages) | |
| full_message = f"Previous conversation:\n{context_str}\nCurrent message: {full_message}" | |
| return full_message | |
| async def chat_completions(request: Request, chat_request: ChatRequest): | |
| """处理聊天完成请求""" | |
| # 获取认证token | |
| auth_header = request.headers.get("Authorization", "") | |
| if not auth_header.startswith("Bearer "): | |
| return Response( | |
| content=json.dumps({"error": "未提供有效的Authorization header"}), | |
| status_code=401 | |
| ) | |
| # auth_token = auth_header.replace("Bearer ", "") | |
| auth_token = '_ss_p="!Qchk9EpPbA5zTHhMJHiuxmMg4RhIfGWZIY7DF2R4A04=?eyJleHAiOiAiY2hhdGxsbV9mcmVlX3RyaWFsIn0="; _a_p="!btjZwmjgZUOOVxYJBrQldxjsPRV9EcIjA061+2LxcF4=?eyJ1aWQiOiAtMX0="; G_ENABLED_IDPS=google; _u_p="!4AIFXKU/ZmspW4rRX7AXr1kOgUxjCW6jHI0RfTEvBXQ=?eyJ1aWQiOiA2NDkzMDkwfQ=="; __stripe_mid=a4827131-604a-4d0c-b09d-768658e6bca61d15f5; __stripe_sid=71178575-ca66-43f1-9e41-1281f6b6398209d484; _s_p="!+V8aq81DJIpQ7J+DSjU1unhhPPmYw5n26f2HjsdM2no=?eyJhbm9uIjogImQ3ZjBhZmQxLTU2ZTUtNGY4NS1iNWU5LTJmZmQxNmE5ZGU1YSIsICJ0b2tlbiI6IG51bGwsICJmbGFnIjogImRlZmF1bHQiLCAic2Vzc2lvbl90aW1lc3RhbXAiOiAxNzQwOTAwMDAwLjAsICJwYXNzd29yZF92ZXJzaW9uIjogMSwgInNzb19pbmZvIjoge30sICJzZXNzaW9uX2lkIjogMTM4NzczNTB9"' | |
| # 创建会话ID | |
| # conversation_id = str(uuid.uuid4())[:-9] | |
| # conversation_id = "d166251ca" | |
| # conversation_id = "7f0f8d76d" | |
| # external_id = str(uuid.uuid4())[:-9] | |
| external_id = "f6ca7aa14" | |
| # 处理消息 | |
| full_message = process_messages(chat_request.messages) | |
| print("start!!!!!") | |
| new_chat_data = {"deploymentId":"47845a764","name":"New Chat","externalApplicationId":"f6ca7aa14"} | |
| # 流式请求处理 | |
| async def generate_stream(): | |
| headers_new_request = get_headers(auth_token) | |
| headers_new_request["referer"] = "https://apps.abacus.ai/chatllm/?appId=" + external_id | |
| # headers["content-length"] = str(len(str(request_data))) | |
| for retry in range(MAX_RETRIES): | |
| try: | |
| response = httpx.post("https://apps.abacus.ai/api/createDeploymentConversation", | |
| data=json.dumps(new_chat_data), | |
| headers=headers_new_request, | |
| ) | |
| conversation_id = response.json()["result"]["deploymentConversationId"] | |
| # 准备请求数据 | |
| request_data = { | |
| "requestId": str(uuid.uuid4()), | |
| "deploymentConversationId": conversation_id, | |
| "message": full_message, | |
| "isDesktop": True, | |
| "chatConfig": { | |
| "timezone": "America/Los_Angeles", | |
| "language": "en-US" | |
| }, | |
| "llmName": MODEL_MAPPING.get(chat_request.model, chat_request.model), | |
| "externalApplicationId": external_id | |
| # "externalApplicationId": "c9a23c6d8" | |
| } | |
| headers = get_headers(auth_token) | |
| headers["referer"] = "https://apps.abacus.ai/chatllm/?appId=" + external_id + "&convoId=" + conversation_id | |
| async with httpx.AsyncClient() as client: | |
| async with client.stream( | |
| "POST", | |
| f"{BASE_URL}/api/_chatLLMSendMessageSSE", | |
| headers=headers, | |
| content=json.dumps(request_data), | |
| timeout=TIMEOUT | |
| ) as response: | |
| thinking_started = False | |
| async for line in response.aiter_lines(): | |
| if not line.strip(): | |
| continue | |
| try: | |
| data = json.loads(line) | |
| # print(line) | |
| if data.get("type") == "collapsible_component": | |
| thinking_started = True | |
| think_id = data.get("messageId", "") | |
| chunk = { | |
| "id": str(uuid.uuid4()), | |
| "object": "chat.completion.chunk", | |
| "created": int(uuid.uuid1().time_low), | |
| "model": chat_request.model, | |
| "choices": [{ | |
| "delta": { | |
| "role": "assistant", | |
| "content": "<think>" | |
| }, | |
| "index": 0 | |
| }] | |
| } | |
| yield f"data: {json.dumps(chunk)}\n\n" | |
| if data.get("type") == "text" and data.get("title") != "Thinking...": | |
| if thinking_started == True and data.get("messageId", "") != think_id: | |
| thinking_started = False | |
| chunk = { | |
| "id": str(uuid.uuid4()), | |
| "object": "chat.completion.chunk", | |
| "created": int(uuid.uuid1().time_low), | |
| "model": chat_request.model, | |
| "choices": [{ | |
| "delta": { | |
| "role": "assistant", | |
| "content": "</think>" + data.get("segment", "") | |
| }, | |
| "index": 0 | |
| }] | |
| } | |
| else: | |
| chunk = { | |
| "id": str(uuid.uuid4()), | |
| "object": "chat.completion.chunk", | |
| "created": int(uuid.uuid1().time_low), | |
| "model": chat_request.model, | |
| "choices": [{ | |
| "delta": { | |
| "role": "assistant", | |
| "content": data.get("segment", "") | |
| }, | |
| "index": 0 | |
| }] | |
| } | |
| yield f"data: {json.dumps(chunk)}\n\n" | |
| if data.get("end"): | |
| # 发送结束标记 | |
| chunk = { | |
| "id": str(uuid.uuid4()), | |
| "object": "chat.completion.chunk", | |
| "created": int(uuid.uuid1().time_low), | |
| "model": chat_request.model, | |
| "choices": [{ | |
| "delta": {"content": ""}, | |
| "index": 0, | |
| "finish_reason": "stop" | |
| }] | |
| } | |
| yield f"data: {json.dumps(chunk)}\n\n" | |
| yield "data: [DONE]\n\n" | |
| break # 成功完成,退出重试循环 | |
| except json.JSONDecodeError: | |
| continue | |
| except (httpx.TimeoutException, httpx.RequestError) as e: | |
| if retry == MAX_RETRIES - 1: # 最后一次重试 | |
| yield f"data: {json.dumps({'error': str(e)})}\n\n" | |
| yield "data: [DONE]\n\n" | |
| return | |
| await asyncio.sleep(RETRY_DELAY) | |
| return StreamingResponse( | |
| generate_stream(), | |
| media_type="text/event-stream" | |
| ) | |
| async def health_check(): | |
| """健康检查""" | |
| return {"status": "ok", "version": "1.0.0"} | |
| async def global_exception_handler(request: Request, exc: Exception): | |
| """全局异常处理""" | |
| error_message = str(exc) | |
| return Response( | |
| content=json.dumps({ | |
| "error": { | |
| "message": error_message, | |
| "type": exc.__class__.__name__, | |
| "code": 500 | |
| } | |
| }), | |
| status_code=500, | |
| media_type="application/json" | |
| ) | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=8000) | |