|
|
import gradio as gr |
|
|
import time |
|
|
import json |
|
|
import uuid |
|
|
import uvicorn |
|
|
import traceback |
|
|
from fastapi import FastAPI, Request, HTTPException, Depends |
|
|
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials |
|
|
from starlette.responses import StreamingResponse |
|
|
from pydantic import BaseModel |
|
|
from typing import List, Optional |
|
|
|
|
|
from selenium import webdriver |
|
|
from selenium.webdriver.common.by import By |
|
|
from selenium.webdriver.common.keys import Keys |
|
|
from selenium.webdriver.chrome.service import Service as ChromeService |
|
|
from selenium.webdriver.support.ui import WebDriverWait |
|
|
from selenium.webdriver.support import expected_conditions as EC |
|
|
|
|
|
|
|
|
app = FastAPI( |
|
|
title="SAI-ChatBot OpenAI-Compatible API", |
|
|
description="使用 Selenium 自动化在后台与 SAI-ChatBot 交互,并以 OpenAI API 格式返回结果。", |
|
|
version="1.2.0-final" |
|
|
) |
|
|
auth_scheme = HTTPBearer() |
|
|
|
|
|
def api_key_auth(credentials: HTTPAuthorizationCredentials = Depends(auth_scheme)): |
|
|
if not credentials: |
|
|
raise HTTPException(status_code=401, detail="Not authenticated") |
|
|
|
|
|
|
|
|
return credentials.credentials |
|
|
|
|
|
|
|
|
class ChatMessage(BaseModel): role: str; content: str |
|
|
class ChatCompletionRequest(BaseModel): model: str; messages: List[ChatMessage]; stream: Optional[bool] = False |
|
|
|
|
|
|
|
|
def get_sai_response(prompt_text: str): |
|
|
|
|
|
options = webdriver.ChromeOptions() |
|
|
options.add_argument("--headless") |
|
|
options.add_argument("--no-sandbox") |
|
|
options.add_argument("--disable-dev-shm-usage") |
|
|
options.add_argument("--disable-gpu") |
|
|
options.binary_location = "/usr/bin/chromium" |
|
|
|
|
|
service = ChromeService(executable_path='/usr/bin/chromedriver') |
|
|
driver = None |
|
|
try: |
|
|
driver = webdriver.Chrome(service=service, options=options) |
|
|
driver.get("https://sai.coludai.cn/") |
|
|
|
|
|
wait = WebDriverWait(driver, 20) |
|
|
textarea_selector = 'textarea[placeholder="随时与未来对话,探索无限可能...."]' |
|
|
textarea = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, textarea_selector))) |
|
|
|
|
|
textarea.send_keys(prompt_text) |
|
|
textarea.send_keys(Keys.RETURN) |
|
|
|
|
|
last_assistant_selector = "(.//div[@class='message-item' and @type='assistant'])[last()]" |
|
|
wait.until(EC.presence_of_element_located((By.XPATH, last_assistant_selector))) |
|
|
last_response_element = driver.find_element(By.XPATH, last_assistant_selector) |
|
|
|
|
|
previous_text = "" |
|
|
max_wait_time = 120 |
|
|
start_time = time.time() |
|
|
|
|
|
while time.time() - start_time < max_wait_time: |
|
|
try: |
|
|
markdown_body = last_response_element.find_element(By.CSS_SELECTOR, '.markdown-body') |
|
|
current_text = markdown_body.text |
|
|
if current_text != previous_text: |
|
|
new_text_chunk = current_text[len(previous_text):] |
|
|
yield new_text_chunk |
|
|
previous_text = current_text |
|
|
|
|
|
time.sleep(1) |
|
|
final_text_check = markdown_body.text |
|
|
if final_text_check == previous_text and final_text_check != "": |
|
|
break |
|
|
except Exception: |
|
|
time.sleep(0.5) |
|
|
except Exception as e: |
|
|
error_message = f"自动化过程中发生严重错误: {e}\n\n详细信息请查看 Hugging Face Space 的日志。" |
|
|
yield error_message |
|
|
finally: |
|
|
if driver: |
|
|
driver.quit() |
|
|
|
|
|
|
|
|
|
|
|
@app.post("/v1/chat/completions") |
|
|
async def chat_completions(request: ChatCompletionRequest, _: str = Depends(api_key_auth)): |
|
|
last_user_message = next((msg.content for msg in reversed(request.messages) if msg.role == 'user'), None) |
|
|
if not last_user_message: raise HTTPException(status_code=400, detail="No user message found") |
|
|
|
|
|
response_id, created_timestamp = f"chatcmpl-{uuid.uuid4()}", int(time.time()) |
|
|
|
|
|
if request.stream: |
|
|
async def stream_generator(): |
|
|
for chunk in get_sai_response(last_user_message): |
|
|
if not chunk: continue |
|
|
response_chunk = {"id": response_id, "object": "chat.completion.chunk", "created": created_timestamp, "model": "sai-chatbot-l6", "choices": [{"index": 0, "delta": {"content": chunk}, "finish_reason": None}]} |
|
|
yield f"data: {json.dumps(response_chunk)}\n\n" |
|
|
yield f"data: [DONE]\n\n" |
|
|
return StreamingResponse(stream_generator(), media_type="text/event-stream") |
|
|
else: |
|
|
full_content = "".join([chunk for chunk in get_sai_response(last_user_message)]) |
|
|
return {"id": response_id, "object": "chat.completion", "created": created_timestamp, "model": "sai-chatbot-l6", "choices": [{"index": 0, "message": {"role": "assistant", "content": full_content}, "finish_reason": "stop"}], "usage": {"prompt_tokens": len(last_user_message), "completion_tokens": len(full_content), "total_tokens": len(last_user_message) + len(full_content)}} |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
uvicorn.run(app, host="0.0.0.0", port=7860) |