Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -27,6 +27,31 @@ def debug_print(message):
|
|
| 27 |
if DEBUG_MODE:
|
| 28 |
print(f"[DEBUG] {message}")
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
def extract_json_from_response(response_text):
|
| 31 |
"""
|
| 32 |
从模型响应中提取并解析JSON(修复UTF-8编码问题)
|
|
@@ -45,6 +70,19 @@ def extract_json_from_response(response_text):
|
|
| 45 |
except Exception:
|
| 46 |
pass
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
# 方法1: 直接解析
|
| 49 |
try:
|
| 50 |
return json.loads(response_text)
|
|
@@ -83,7 +121,8 @@ def extract_json_from_response(response_text):
|
|
| 83 |
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
| 84 |
debug_print(f"清理后的JSON解析失败: {e}")
|
| 85 |
|
| 86 |
-
|
|
|
|
| 87 |
|
| 88 |
# =======================
|
| 89 |
# 工具函数:调用模型
|
|
@@ -105,7 +144,23 @@ def query_model(prompt, model_name="Qwen/Qwen3-4B-Thinking-2507:nscale"):
|
|
| 105 |
api_key=HF_TOKEN # 使用你的 Token
|
| 106 |
)
|
| 107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
try:
|
|
|
|
|
|
|
|
|
|
| 109 |
response = client.chat.completions.create(
|
| 110 |
model=model_name,
|
| 111 |
messages=[
|
|
@@ -114,21 +169,70 @@ def query_model(prompt, model_name="Qwen/Qwen3-4B-Thinking-2507:nscale"):
|
|
| 114 |
max_tokens=1000,
|
| 115 |
temperature=0.7
|
| 116 |
)
|
|
|
|
|
|
|
|
|
|
| 117 |
if response.choices:
|
| 118 |
content = response.choices[0].message.content.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
try:
|
| 120 |
content = content.encode('utf-8').decode('utf-8')
|
| 121 |
except Exception:
|
| 122 |
pass
|
| 123 |
return content
|
| 124 |
else:
|
|
|
|
| 125 |
return None
|
| 126 |
except Exception as e:
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
if "401" in str(e):
|
| 129 |
print("错误:API密钥无效。请检查HF_TOKEN环境变量。")
|
| 130 |
elif "404" in str(e):
|
| 131 |
-
print(f"错误:模型 {model_name} 未找到。")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
return None
|
| 133 |
|
| 134 |
# =======================
|
|
@@ -165,15 +269,27 @@ def plan_tasks(user_request, filename=None):
|
|
| 165 |
planning_response = query_model(planning_prompt)
|
| 166 |
debug_print(f"模型响应: {planning_response[:200] if planning_response else '空响应'}")
|
| 167 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
try:
|
| 169 |
-
if not planning_response:
|
| 170 |
-
raise ValueError("模型返回空响应")
|
| 171 |
plan = extract_json_from_response(planning_response)
|
| 172 |
debug_print("JSON解析成功")
|
| 173 |
return plan
|
| 174 |
except Exception as e:
|
| 175 |
error_msg = f"规划解析失败: {e}\n模型响应内容: {planning_response[:500] if planning_response else '空响应'}"
|
| 176 |
print(error_msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
raise Exception("任务规划失败,请重新描述您的需求。")
|
| 178 |
|
| 179 |
# =======================
|
|
@@ -311,7 +427,21 @@ def ai_agent_master(uploaded_file, user_request):
|
|
| 311 |
chat_history.append({"role": "assistant", "content": "正在分析需求并分解任务..."})
|
| 312 |
yield chat_history, None
|
| 313 |
|
| 314 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
|
| 316 |
if not task_plan or "steps" not in task_plan:
|
| 317 |
raise Exception("任务规划返回无效格式,请重试。")
|
|
@@ -330,10 +460,28 @@ def ai_agent_master(uploaded_file, user_request):
|
|
| 330 |
chat_history.append({"role": "assistant", "content": f"正在调用模型 {step_model} 执行步骤 {step_num}: {step_desc}..."})
|
| 331 |
yield chat_history, None
|
| 332 |
|
| 333 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
if step_file:
|
| 335 |
final_generated_file = step_file
|
| 336 |
|
|
|
|
|
|
|
|
|
|
| 337 |
display_result = step_result if len(step_result) < 500 else step_result[:500] + "... (结果过长已截断)"
|
| 338 |
chat_history.append({"role": "assistant", "content": f"步骤 {step_num} 结果: {display_result}"})
|
| 339 |
yield chat_history, None
|
|
@@ -341,12 +489,30 @@ def ai_agent_master(uploaded_file, user_request):
|
|
| 341 |
chat_history.append({"role": "assistant", "content": f"正在验证步骤 {step_num}..."})
|
| 342 |
yield chat_history, None
|
| 343 |
|
| 344 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 345 |
|
| 346 |
if not is_valid:
|
| 347 |
chat_history.append({"role": "assistant", "content": f"步骤 {step_num} 未通过验证: {validation_feedback}"})
|
| 348 |
yield chat_history, None
|
| 349 |
-
|
|
|
|
| 350 |
|
| 351 |
chat_history.append({"role": "assistant", "content": f"步骤 {step_num} 验证通过!反馈: {validation_feedback}"})
|
| 352 |
yield chat_history, None
|
|
@@ -357,7 +523,9 @@ def ai_agent_master(uploaded_file, user_request):
|
|
| 357 |
yield chat_history, final_generated_file
|
| 358 |
|
| 359 |
except Exception as e:
|
| 360 |
-
|
|
|
|
|
|
|
| 361 |
yield chat_history, None
|
| 362 |
|
| 363 |
# =======================
|
|
@@ -389,4 +557,15 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 389 |
|
| 390 |
# 启动
|
| 391 |
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 392 |
demo.launch()
|
|
|
|
| 27 |
if DEBUG_MODE:
|
| 28 |
print(f"[DEBUG] {message}")
|
| 29 |
|
| 30 |
+
# 添加网络连接测试函数
|
| 31 |
+
def test_hf_connection():
|
| 32 |
+
"""测试Hugging Face API连接"""
|
| 33 |
+
try:
|
| 34 |
+
import requests
|
| 35 |
+
test_url = "https://api-inference.huggingface.co/v1/models"
|
| 36 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 37 |
+
|
| 38 |
+
debug_print("测试Hugging Face API连接...")
|
| 39 |
+
response = requests.get(test_url, headers=headers, timeout=10)
|
| 40 |
+
debug_print(f"连接测试响应状态: {response.status_code}")
|
| 41 |
+
|
| 42 |
+
if response.status_code == 200:
|
| 43 |
+
debug_print("Hugging Face API连接成功")
|
| 44 |
+
return True
|
| 45 |
+
elif response.status_code == 401:
|
| 46 |
+
print("错误: Hugging Face API密钥无效")
|
| 47 |
+
return False
|
| 48 |
+
else:
|
| 49 |
+
print(f"警告: Hugging Face API连接测试返回状态码 {response.status_code}")
|
| 50 |
+
return True # 其他状态码可能仍然可以工作
|
| 51 |
+
except Exception as e:
|
| 52 |
+
print(f"警告: 无法连接到Hugging Face API: {e}")
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
def extract_json_from_response(response_text):
|
| 56 |
"""
|
| 57 |
从模型响应中提取并解析JSON(修复UTF-8编码问题)
|
|
|
|
| 70 |
except Exception:
|
| 71 |
pass
|
| 72 |
|
| 73 |
+
# 清理响应文本,移除可能的前缀或后缀
|
| 74 |
+
response_text = response_text.strip()
|
| 75 |
+
|
| 76 |
+
# 移除可能的 Markdown 代码块标记
|
| 77 |
+
if response_text.startswith("```json"):
|
| 78 |
+
response_text = response_text[7:]
|
| 79 |
+
if response_text.startswith("```"):
|
| 80 |
+
response_text = response_text[3:]
|
| 81 |
+
if response_text.endswith("```"):
|
| 82 |
+
response_text = response_text[:-3]
|
| 83 |
+
|
| 84 |
+
response_text = response_text.strip()
|
| 85 |
+
|
| 86 |
# 方法1: 直接解析
|
| 87 |
try:
|
| 88 |
return json.loads(response_text)
|
|
|
|
| 121 |
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
| 122 |
debug_print(f"清理后的JSON解析失败: {e}")
|
| 123 |
|
| 124 |
+
# 如果所有方法都失败,抛出更详细的错误信息
|
| 125 |
+
raise ValueError(f"无法从响应中提取有效的JSON。响应长度: {len(response_text)}, 响应前缀: {response_text[:100] if len(response_text) > 100 else response_text}")
|
| 126 |
|
| 127 |
# =======================
|
| 128 |
# 工具函数:调用模型
|
|
|
|
| 144 |
api_key=HF_TOKEN # 使用你的 Token
|
| 145 |
)
|
| 146 |
|
| 147 |
+
# 添加模型名称验证和备用模型
|
| 148 |
+
available_models = [
|
| 149 |
+
"Qwen/Qwen3-4B-Thinking-2507:nscale",
|
| 150 |
+
"Qwen/Qwen2.5-7B-Instruct:nscale",
|
| 151 |
+
"meta-llama/Llama-3.1-8B-Instruct:nscale",
|
| 152 |
+
"mistralai/Mistral-Nemo-Instruct-2407:nscale"
|
| 153 |
+
]
|
| 154 |
+
|
| 155 |
+
# 如果指定的模型不在可用列表中,使用默认模型
|
| 156 |
+
if model_name not in available_models:
|
| 157 |
+
print(f"警告: 模型 {model_name} 不在推荐列表中,将使用默认模型")
|
| 158 |
+
model_name = "Qwen/Qwen3-4B-Thinking-2507:nscale"
|
| 159 |
+
|
| 160 |
try:
|
| 161 |
+
debug_print(f"正在调用模型: {model_name}")
|
| 162 |
+
debug_print(f"提示词长度: {len(prompt)} 字符")
|
| 163 |
+
|
| 164 |
response = client.chat.completions.create(
|
| 165 |
model=model_name,
|
| 166 |
messages=[
|
|
|
|
| 169 |
max_tokens=1000,
|
| 170 |
temperature=0.7
|
| 171 |
)
|
| 172 |
+
|
| 173 |
+
debug_print(f"API响应状态: 成功")
|
| 174 |
+
|
| 175 |
if response.choices:
|
| 176 |
content = response.choices[0].message.content.strip()
|
| 177 |
+
debug_print(f"响应内容长度: {len(content) if content else 0} 字符")
|
| 178 |
+
|
| 179 |
+
# 检查空响应
|
| 180 |
+
if not content:
|
| 181 |
+
print("警告: 模型返回空内容")
|
| 182 |
+
return None
|
| 183 |
+
|
| 184 |
try:
|
| 185 |
content = content.encode('utf-8').decode('utf-8')
|
| 186 |
except Exception:
|
| 187 |
pass
|
| 188 |
return content
|
| 189 |
else:
|
| 190 |
+
print("警告: API响应中没有choices字段")
|
| 191 |
return None
|
| 192 |
except Exception as e:
|
| 193 |
+
error_msg = f"Error calling model: {e}"
|
| 194 |
+
print(error_msg)
|
| 195 |
+
debug_print(f"错误详情: {type(e).__name__}: {str(e)}")
|
| 196 |
+
|
| 197 |
+
# 更详细的错误处理
|
| 198 |
if "401" in str(e):
|
| 199 |
print("错误:API密钥无效。请检查HF_TOKEN环境变量。")
|
| 200 |
elif "404" in str(e):
|
| 201 |
+
print(f"错误:模型 {model_name} 未找到。尝试使用备用模型...")
|
| 202 |
+
# 尝试备用模型
|
| 203 |
+
for backup_model in available_models:
|
| 204 |
+
if backup_model != model_name:
|
| 205 |
+
print(f"尝试备用模型: {backup_model}")
|
| 206 |
+
try:
|
| 207 |
+
response = client.chat.completions.create(
|
| 208 |
+
model=backup_model,
|
| 209 |
+
messages=[
|
| 210 |
+
{"role": "user", "content": prompt}
|
| 211 |
+
],
|
| 212 |
+
max_tokens=1000,
|
| 213 |
+
temperature=0.7
|
| 214 |
+
)
|
| 215 |
+
if response.choices:
|
| 216 |
+
content = response.choices[0].message.content.strip()
|
| 217 |
+
# 检查空响应
|
| 218 |
+
if not content:
|
| 219 |
+
print(f"备用模型 {backup_model} 返回空内容")
|
| 220 |
+
continue
|
| 221 |
+
|
| 222 |
+
try:
|
| 223 |
+
content = content.encode('utf-8').decode('utf-8')
|
| 224 |
+
except Exception:
|
| 225 |
+
pass
|
| 226 |
+
print(f"备用模型 {backup_model} 调用成功")
|
| 227 |
+
return content
|
| 228 |
+
except Exception as backup_e:
|
| 229 |
+
print(f"备用模型 {backup_model} 调用失败: {backup_e}")
|
| 230 |
+
continue
|
| 231 |
+
print("所有模型都调用失败")
|
| 232 |
+
elif "timeout" in str(e).lower() or "time out" in str(e).lower():
|
| 233 |
+
print("错误:API调用超时,请检查网络连接。")
|
| 234 |
+
elif "connection" in str(e).lower():
|
| 235 |
+
print("错误:网络连接问题,请检查网络设置。")
|
| 236 |
return None
|
| 237 |
|
| 238 |
# =======================
|
|
|
|
| 269 |
planning_response = query_model(planning_prompt)
|
| 270 |
debug_print(f"模型响应: {planning_response[:200] if planning_response else '空响应'}")
|
| 271 |
|
| 272 |
+
# 增强空响应处理
|
| 273 |
+
if not planning_response:
|
| 274 |
+
print("任务规划失败: 模型返回空响应")
|
| 275 |
+
print("可能的原因:")
|
| 276 |
+
print("1. Hugging Face API密钥无效")
|
| 277 |
+
print("2. 网络连接问题")
|
| 278 |
+
print("3. 模型当前不可用")
|
| 279 |
+
print("4. 请求超时")
|
| 280 |
+
raise Exception("任务规划失败: 模型返回空响应")
|
| 281 |
+
|
| 282 |
try:
|
|
|
|
|
|
|
| 283 |
plan = extract_json_from_response(planning_response)
|
| 284 |
debug_print("JSON解析成功")
|
| 285 |
return plan
|
| 286 |
except Exception as e:
|
| 287 |
error_msg = f"规划解析失败: {e}\n模型响应内容: {planning_response[:500] if planning_response else '空响应'}"
|
| 288 |
print(error_msg)
|
| 289 |
+
# 添加更多调试信息
|
| 290 |
+
if planning_response:
|
| 291 |
+
print(f"响应长度: {len(planning_response)} 字符")
|
| 292 |
+
print(f"响应前100字符: {planning_response[:100] if len(planning_response) > 100 else planning_response}")
|
| 293 |
raise Exception("任务规划失败,请重新描述您的需求。")
|
| 294 |
|
| 295 |
# =======================
|
|
|
|
| 427 |
chat_history.append({"role": "assistant", "content": "正在分析需求并分解任务..."})
|
| 428 |
yield chat_history, None
|
| 429 |
|
| 430 |
+
# 添加重试机制
|
| 431 |
+
max_retries = 3
|
| 432 |
+
retry_count = 0
|
| 433 |
+
task_plan = None
|
| 434 |
+
|
| 435 |
+
while retry_count < max_retries and task_plan is None:
|
| 436 |
+
try:
|
| 437 |
+
task_plan = plan_tasks(user_request, uploaded_file.name if uploaded_file else None)
|
| 438 |
+
except Exception as e:
|
| 439 |
+
retry_count += 1
|
| 440 |
+
if retry_count < max_retries:
|
| 441 |
+
print(f"任务规划失败,{retry_count}秒后重试... (第{retry_count}次)")
|
| 442 |
+
time.sleep(retry_count) # 递增延迟
|
| 443 |
+
else:
|
| 444 |
+
raise e # 达到最大重试次数,抛出异常
|
| 445 |
|
| 446 |
if not task_plan or "steps" not in task_plan:
|
| 447 |
raise Exception("任务规划返回无效格式,请重试。")
|
|
|
|
| 460 |
chat_history.append({"role": "assistant", "content": f"正在调用模型 {step_model} 执行步骤 {step_num}: {step_desc}..."})
|
| 461 |
yield chat_history, None
|
| 462 |
|
| 463 |
+
# 添加步骤执行重试机制
|
| 464 |
+
step_retry_count = 0
|
| 465 |
+
step_result = None
|
| 466 |
+
step_file = None
|
| 467 |
+
|
| 468 |
+
while step_retry_count < max_retries and step_result is None:
|
| 469 |
+
try:
|
| 470 |
+
step_result, step_file = execute_step(step, uploaded_file)
|
| 471 |
+
except Exception as e:
|
| 472 |
+
step_retry_count += 1
|
| 473 |
+
if step_retry_count < max_retries:
|
| 474 |
+
print(f"步骤执行失败,{step_retry_count}秒后重试... (第{step_retry_count}次)")
|
| 475 |
+
time.sleep(step_retry_count)
|
| 476 |
+
else:
|
| 477 |
+
raise e # 达到最大重试次数,抛出异常
|
| 478 |
+
|
| 479 |
if step_file:
|
| 480 |
final_generated_file = step_file
|
| 481 |
|
| 482 |
+
if not step_result:
|
| 483 |
+
raise Exception(f"步骤 {step_num} 执行返回空结果")
|
| 484 |
+
|
| 485 |
display_result = step_result if len(step_result) < 500 else step_result[:500] + "... (结果过长已截断)"
|
| 486 |
chat_history.append({"role": "assistant", "content": f"步骤 {step_num} 结果: {display_result}"})
|
| 487 |
yield chat_history, None
|
|
|
|
| 489 |
chat_history.append({"role": "assistant", "content": f"正在验证步骤 {step_num}..."})
|
| 490 |
yield chat_history, None
|
| 491 |
|
| 492 |
+
# 添加验证重试机制
|
| 493 |
+
validation_retry_count = 0
|
| 494 |
+
is_valid = False
|
| 495 |
+
validation_feedback = ""
|
| 496 |
+
|
| 497 |
+
while validation_retry_count < max_retries and not is_valid:
|
| 498 |
+
try:
|
| 499 |
+
is_valid, validation_feedback = validate_step(step, step_result, user_request)
|
| 500 |
+
except Exception as e:
|
| 501 |
+
validation_retry_count += 1
|
| 502 |
+
if validation_retry_count < max_retries:
|
| 503 |
+
print(f"步骤验证失败,{validation_retry_count}秒后重试... (第{validation_retry_count}次)")
|
| 504 |
+
time.sleep(validation_retry_count)
|
| 505 |
+
else:
|
| 506 |
+
# 验证失败不终止整个流程,但记录警告
|
| 507 |
+
print(f"步骤 {step_num} 验证失败: {e}")
|
| 508 |
+
is_valid = True # 继续执行而不是终止
|
| 509 |
+
validation_feedback = "验证过程出现异常,但继续执行下一步"
|
| 510 |
|
| 511 |
if not is_valid:
|
| 512 |
chat_history.append({"role": "assistant", "content": f"步骤 {step_num} 未通过验证: {validation_feedback}"})
|
| 513 |
yield chat_history, None
|
| 514 |
+
# 不再抛出异常终止整个流程,而是记录警告继续
|
| 515 |
+
print(f"警告: 步骤 {step_num} 验证失败,但继续执行下一步")
|
| 516 |
|
| 517 |
chat_history.append({"role": "assistant", "content": f"步骤 {step_num} 验证通过!反馈: {validation_feedback}"})
|
| 518 |
yield chat_history, None
|
|
|
|
| 523 |
yield chat_history, final_generated_file
|
| 524 |
|
| 525 |
except Exception as e:
|
| 526 |
+
error_msg = f"执行过程中出现错误: {str(e)}"
|
| 527 |
+
print(f"详细错误信息: {type(e).__name__}: {str(e)}")
|
| 528 |
+
chat_history.append({"role": "assistant", "content": error_msg})
|
| 529 |
yield chat_history, None
|
| 530 |
|
| 531 |
# =======================
|
|
|
|
| 557 |
|
| 558 |
# 启动
|
| 559 |
if __name__ == "__main__":
|
| 560 |
+
print("正在初始化AI智能体...")
|
| 561 |
+
if DEBUG_MODE:
|
| 562 |
+
print("调试模式已启用")
|
| 563 |
+
|
| 564 |
+
# 测试Hugging Face连接
|
| 565 |
+
if test_hf_connection():
|
| 566 |
+
print("Hugging Face API连接测试通过")
|
| 567 |
+
else:
|
| 568 |
+
print("警告: Hugging Face API连接测试失败,但仍将继续启动...")
|
| 569 |
+
|
| 570 |
+
print("启动Gradio界面...")
|
| 571 |
demo.launch()
|