sehsapneb commited on
Commit
5a43cd4
·
verified ·
1 Parent(s): 1b2c6f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -32
app.py CHANGED
@@ -1,8 +1,9 @@
1
- import gradio as gr # 我们实际没用它,但可以保留这个导入以防万一
2
  import time
3
  import json
4
  import uuid
5
- import uvicorn # 必须导入 uvicorn
 
6
  from fastapi import FastAPI, Request, HTTPException, Depends
7
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
8
  from starlette.responses import StreamingResponse
@@ -20,56 +21,69 @@ from selenium.webdriver.support import expected_conditions as EC
20
  app = FastAPI(
21
  title="SAI-ChatBot OpenAI-Compatible API",
22
  description="使用 Selenium 自动化在后台与 SAI-ChatBot 交互,并以 OpenAI API 格式返回结果。",
23
- version="1.0.0"
24
  )
25
  auth_scheme = HTTPBearer()
26
 
27
  def api_key_auth(credentials: HTTPAuthorizationCredentials = Depends(auth_scheme)):
28
- if not credentials:
29
- raise HTTPException(status_code=401, detail="Not authenticated", headers={"WWW-Authenticate": "Bearer"})
30
  return credentials.token
31
 
32
  # --- 2. OpenAI 格式的数据模型 ---
33
- class ChatMessage(BaseModel):
34
- role: str
35
- content: str
36
 
37
- class ChatCompletionRequest(BaseModel):
38
- model: str
39
- messages: List[ChatMessage]
40
- stream: Optional[bool] = False
41
-
42
- # --- 3. Selenium 自动化核心函数 ---
43
  def get_sai_response(prompt_text: str):
 
44
  options = webdriver.ChromeOptions()
45
  options.add_argument("--headless")
46
  options.add_argument("--no-sandbox")
47
  options.add_argument("--disable-dev-shm-usage")
48
  options.add_argument("--disable-gpu")
49
  options.binary_location = "/usr/bin/chromium"
50
-
51
  service = ChromeService(executable_path='/usr/bin/chromedriver')
52
  driver = None
53
  try:
 
54
  driver = webdriver.Chrome(service=service, options=options)
 
 
 
55
  driver.get("https://sai.coludai.cn/")
 
56
 
 
 
 
 
 
 
57
  wait = WebDriverWait(driver, 20)
58
  textarea_selector = 'textarea[placeholder="随时与未来对话,探索无限可能...."]'
 
59
  textarea = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, textarea_selector)))
 
60
 
 
61
  textarea.send_keys(prompt_text)
62
  textarea.send_keys(Keys.RETURN)
 
63
 
64
  last_assistant_selector = "(.//div[@class='message-item' and @type='assistant'])[last()]"
 
65
  wait.until(EC.presence_of_element_located((By.XPATH, last_assistant_selector)))
66
  last_response_element = driver.find_element(By.XPATH, last_assistant_selector)
 
67
 
68
  previous_text = ""
69
  max_wait_time = 120
70
  start_time = time.time()
71
 
 
72
  while time.time() - start_time < max_wait_time:
 
73
  try:
74
  markdown_body = last_response_element.find_element(By.CSS_SELECTOR, '.markdown-body')
75
  current_text = markdown_body.text
@@ -78,25 +92,43 @@ def get_sai_response(prompt_text: str):
78
  yield new_text_chunk
79
  previous_text = current_text
80
 
81
- time.sleep(1) # 稍作等待,让内容有机会刷新
82
  final_text_check = markdown_body.text
83
  if final_text_check == previous_text and final_text_check != "":
84
  break
85
- except Exception:
 
 
86
  time.sleep(0.5)
 
 
 
87
  except Exception as e:
88
- error_message = f"自动化过程中发生错误: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
89
  yield error_message
90
  finally:
91
  if driver:
 
92
  driver.quit()
 
93
 
94
- # --- 4. API 端点定义 ---
95
  @app.post("/v1/chat/completions")
96
  async def chat_completions(request: ChatCompletionRequest, token: str = Depends(api_key_auth)):
97
  last_user_message = next((msg.content for msg in reversed(request.messages) if msg.role == 'user'), None)
98
- if not last_user_message:
99
- raise HTTPException(status_code=400, detail="No user message found")
100
 
101
  response_id, created_timestamp = f"chatcmpl-{uuid.uuid4()}", int(time.time())
102
 
@@ -104,22 +136,14 @@ async def chat_completions(request: ChatCompletionRequest, token: str = Depends(
104
  async def stream_generator():
105
  for chunk in get_sai_response(last_user_message):
106
  if not chunk: continue
107
- response_chunk = {
108
- "id": response_id, "object": "chat.completion.chunk", "created": created_timestamp,
109
- "model": "sai-chatbot-l6", "choices": [{"index": 0, "delta": {"content": chunk}, "finish_reason": None}]
110
- }
111
  yield f"data: {json.dumps(response_chunk)}\n\n"
112
  yield f"data: [DONE]\n\n"
113
  return StreamingResponse(stream_generator(), media_type="text/event-stream")
114
  else:
115
  full_content = "".join([chunk for chunk in get_sai_response(last_user_message)])
116
- return {
117
- "id": response_id, "object": "chat.completion", "created": created_timestamp,
118
- "model": "sai-chatbot-l6", "choices": [{"index": 0, "message": {"role": "assistant", "content": full_content}, "finish_reason": "stop"}],
119
- "usage": {"prompt_tokens": len(last_user_message), "completion_tokens": len(full_content), "total_tokens": len(last_user_message) + len(full_content)}
120
- }
121
 
122
- # --- 5. 【关键】启动服务器 ---
123
  if __name__ == "__main__":
124
- # 在 Hugging Face Spaces 中,应用需要监听 0.0.0.0:7860
125
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ import gradio as gr
2
  import time
3
  import json
4
  import uuid
5
+ import uvicorn
6
+ import traceback # 导入用于打印详细错误信息的库
7
  from fastapi import FastAPI, Request, HTTPException, Depends
8
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
9
  from starlette.responses import StreamingResponse
 
21
  app = FastAPI(
22
  title="SAI-ChatBot OpenAI-Compatible API",
23
  description="使用 Selenium 自动化在后台与 SAI-ChatBot 交互,并以 OpenAI API 格式返回结果。",
24
+ version="1.1.0-debug"
25
  )
26
  auth_scheme = HTTPBearer()
27
 
28
  def api_key_auth(credentials: HTTPAuthorizationCredentials = Depends(auth_scheme)):
29
+ if not credentials: raise HTTPException(status_code=401, detail="Not authenticated")
 
30
  return credentials.token
31
 
32
  # --- 2. OpenAI 格式的数据模型 ---
33
+ class ChatMessage(BaseModel): role: str; content: str
34
+ class ChatCompletionRequest(BaseModel): model: str; messages: List[ChatMessage]; stream: Optional[bool] = False
 
35
 
36
+ # --- 3. Selenium 自动化核心函数 (带黑匣子) ---
 
 
 
 
 
37
  def get_sai_response(prompt_text: str):
38
+ print("--- [DEBUG] 进入 get_sai_response 函数 ---")
39
  options = webdriver.ChromeOptions()
40
  options.add_argument("--headless")
41
  options.add_argument("--no-sandbox")
42
  options.add_argument("--disable-dev-shm-usage")
43
  options.add_argument("--disable-gpu")
44
  options.binary_location = "/usr/bin/chromium"
45
+
46
  service = ChromeService(executable_path='/usr/bin/chromedriver')
47
  driver = None
48
  try:
49
+ print("--- [DEBUG] 正在初始化 Chrome Driver... ---")
50
  driver = webdriver.Chrome(service=service, options=options)
51
+ print("--- [DEBUG] Chrome Driver 初始化成功。---")
52
+
53
+ print(f"--- [DEBUG] 正在访问: https://sai.coludai.cn/ ---")
54
  driver.get("https://sai.coludai.cn/")
55
+ print("--- [DEBUG] 页面 get() 方法执行完毕。---")
56
 
57
+ # 保存截图和源码,看看我们到底加载了什么页面
58
+ driver.save_screenshot("debug_page_loaded.png")
59
+ with open("debug_page_source.html", "w", encoding="utf-8") as f:
60
+ f.write(driver.page_source)
61
+ print("--- [DEBUG] 已保存加载后的页面截图和源码。---")
62
+
63
  wait = WebDriverWait(driver, 20)
64
  textarea_selector = 'textarea[placeholder="随时与未来对话,探索无限可能...."]'
65
+ print(f"--- [DEBUG] 正在等待输入框 (selector: {textarea_selector})... ---")
66
  textarea = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, textarea_selector)))
67
+ print("--- [DEBUG] 输入框定位成功。---")
68
 
69
+ print("--- [DEBUG] 正在输入并发送 prompt... ---")
70
  textarea.send_keys(prompt_text)
71
  textarea.send_keys(Keys.RETURN)
72
+ print("--- [DEBUG] Prompt 已发送。---")
73
 
74
  last_assistant_selector = "(.//div[@class='message-item' and @type='assistant'])[last()]"
75
+ print(f"--- [DEBUG] 正在等待新的 AI 回复框 (selector: {last_assistant_selector})... ---")
76
  wait.until(EC.presence_of_element_located((By.XPATH, last_assistant_selector)))
77
  last_response_element = driver.find_element(By.XPATH, last_assistant_selector)
78
+ print("--- [DEBUG] 新的 AI 回复框已出现。---")
79
 
80
  previous_text = ""
81
  max_wait_time = 120
82
  start_time = time.time()
83
 
84
+ print("--- [DEBUG] 进入循环,开始捕获流式文本... ---")
85
  while time.time() - start_time < max_wait_time:
86
+ # (内部循环代码保持不变)
87
  try:
88
  markdown_body = last_response_element.find_element(By.CSS_SELECTOR, '.markdown-body')
89
  current_text = markdown_body.text
 
92
  yield new_text_chunk
93
  previous_text = current_text
94
 
95
+ time.sleep(1)
96
  final_text_check = markdown_body.text
97
  if final_text_check == previous_text and final_text_check != "":
98
  break
99
+ except Exception as loop_e:
100
+ # 捕获循环内的错误但继续
101
+ print(f"--- [DEBUG] 循环中出现小错误: {loop_e} ---")
102
  time.sleep(0.5)
103
+
104
+ print("--- [DEBUG] 文本捕获循环结束。---")
105
+
106
  except Exception as e:
107
+ print("\n" + "="*20 + " !!! 发生严重错误 !!! " + "="*20)
108
+ # 打印非常详细的错误堆栈信息
109
+ print(traceback.format_exc())
110
+ print("="*60 + "\n")
111
+
112
+ # 在崩溃时,再保存一次截图和源码,这是最有价值的线索
113
+ if driver:
114
+ error_screenshot_path = "debug_error_screenshot.png"
115
+ driver.save_screenshot(error_screenshot_path)
116
+ print(f"--- [CRASH] 错误截图已保存到: {error_screenshot_path} ---")
117
+
118
+ # 产生一个包含详细错误信息的流式响应
119
+ error_message = f"自动化过程中发生严重错误: {e}\n\n详细信息请查看 Hugging Face Space 的日志。"
120
  yield error_message
121
  finally:
122
  if driver:
123
+ print("--- [DEBUG] 正在关闭 Chrome Driver... ---")
124
  driver.quit()
125
+ print("--- [DEBUG] Chrome Driver 已关闭。---")
126
 
127
+ # --- 4. API 端点定义 (保持不变) ---
128
  @app.post("/v1/chat/completions")
129
  async def chat_completions(request: ChatCompletionRequest, token: str = Depends(api_key_auth)):
130
  last_user_message = next((msg.content for msg in reversed(request.messages) if msg.role == 'user'), None)
131
+ if not last_user_message: raise HTTPException(status_code=400, detail="No user message found")
 
132
 
133
  response_id, created_timestamp = f"chatcmpl-{uuid.uuid4()}", int(time.time())
134
 
 
136
  async def stream_generator():
137
  for chunk in get_sai_response(last_user_message):
138
  if not chunk: continue
139
+ response_chunk = {"id": response_id, "object": "chat.completion.chunk", "created": created_timestamp, "model": "sai-chatbot-l6", "choices": [{"index": 0, "delta": {"content": chunk}, "finish_reason": None}]}
 
 
 
140
  yield f"data: {json.dumps(response_chunk)}\n\n"
141
  yield f"data: [DONE]\n\n"
142
  return StreamingResponse(stream_generator(), media_type="text/event-stream")
143
  else:
144
  full_content = "".join([chunk for chunk in get_sai_response(last_user_message)])
145
+ return {"id": response_id, "object": "chat.completion", "created": created_timestamp, "model": "sai-chatbot-l6", "choices": [{"index": 0, "message": {"role": "assistant", "content": full_content}, "finish_reason": "stop"}], "usage": {"prompt_tokens": len(last_user_message), "completion_tokens": len(full_content), "total_tokens": len(last_user_message) + len(full_content)}}
 
 
 
 
146
 
147
+ # --- 5. 启动服务器 (保持不变) ---
148
  if __name__ == "__main__":
 
149
  uvicorn.run(app, host="0.0.0.0", port=7860)