Spaces:
Sleeping
Sleeping
Commit
·
24ff1cd
1
Parent(s):
b630386
Fix 410 error: add backup models and better error handling
Browse files
app.py
CHANGED
|
@@ -7,7 +7,11 @@ import os
|
|
| 7 |
import requests
|
| 8 |
|
| 9 |
# Hugging Face Inference APIの設定
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
HF_API_TOKEN = os.getenv("HF_API_TOKEN", "")
|
| 12 |
|
| 13 |
# 优化的System Prompt
|
|
@@ -58,41 +62,79 @@ def generate_response(message, history):
|
|
| 58 |
if HF_API_TOKEN:
|
| 59 |
headers["Authorization"] = f"Bearer {HF_API_TOKEN}"
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
"
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
"do_sample": True,
|
| 68 |
-
"return_full_text": False,
|
| 69 |
-
},
|
| 70 |
-
"options": {
|
| 71 |
-
"wait_for_model": True
|
| 72 |
-
}
|
| 73 |
-
}
|
| 74 |
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
except Exception as e:
|
| 95 |
-
return f"エラーが発生しました: {str(e)}"
|
| 96 |
|
| 97 |
# 创建Gradio界面
|
| 98 |
def create_interface():
|
|
|
|
| 7 |
import requests
|
| 8 |
|
| 9 |
# Hugging Face Inference APIの設定
|
| 10 |
+
# 如果默认模型不可用,可以尝试其他模型:
|
| 11 |
+
# - elyza/ELYZA-japanese-Llama-2-7b-fast-instruct
|
| 12 |
+
# - cyberagent/calm2-7b-chat
|
| 13 |
+
# - meta-llama/Llama-2-7b-chat-hf (需要认证)
|
| 14 |
+
HF_API_URL = os.getenv("HF_API_URL", "https://api-inference.huggingface.co/models/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct")
|
| 15 |
HF_API_TOKEN = os.getenv("HF_API_TOKEN", "")
|
| 16 |
|
| 17 |
# 优化的System Prompt
|
|
|
|
| 62 |
if HF_API_TOKEN:
|
| 63 |
headers["Authorization"] = f"Bearer {HF_API_TOKEN}"
|
| 64 |
|
| 65 |
+
# 备用模型列表(如果主模型不可用)
|
| 66 |
+
backup_models = [
|
| 67 |
+
"https://api-inference.huggingface.co/models/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct",
|
| 68 |
+
"https://api-inference.huggingface.co/models/cyberagent/calm2-7b-chat",
|
| 69 |
+
"https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2",
|
| 70 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
+
# 尝试主模型和备用模型
|
| 73 |
+
models_to_try = [HF_API_URL] + backup_models
|
| 74 |
+
|
| 75 |
+
for model_url in models_to_try:
|
| 76 |
+
payload = {
|
| 77 |
+
"inputs": prompt,
|
| 78 |
+
"parameters": {
|
| 79 |
+
"max_new_tokens": 512,
|
| 80 |
+
"temperature": 0.7,
|
| 81 |
+
"top_p": 0.9,
|
| 82 |
+
"do_sample": True,
|
| 83 |
+
"return_full_text": False,
|
| 84 |
+
},
|
| 85 |
+
"options": {
|
| 86 |
+
"wait_for_model": True
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
|
| 90 |
+
try:
|
| 91 |
+
response = requests.post(model_url, headers=headers, json=payload, timeout=60)
|
| 92 |
+
|
| 93 |
+
# 如果成功,返回结果
|
| 94 |
+
if response.status_code == 200:
|
| 95 |
+
result = response.json()
|
| 96 |
+
|
| 97 |
+
if isinstance(result, list) and len(result) > 0:
|
| 98 |
+
generated_text = result[0].get("generated_text", "")
|
| 99 |
+
# 清理回答
|
| 100 |
+
generated_text = generated_text.strip()
|
| 101 |
+
# 移除可能的重复提示词
|
| 102 |
+
if "[/INST]" in generated_text:
|
| 103 |
+
generated_text = generated_text.split("[/INST]")[-1].strip()
|
| 104 |
+
if "<s>" in generated_text:
|
| 105 |
+
generated_text = generated_text.split("<s>")[-1].strip()
|
| 106 |
+
return generated_text
|
| 107 |
+
elif isinstance(result, dict) and "generated_text" in result:
|
| 108 |
+
generated_text = result["generated_text"].strip()
|
| 109 |
+
if "[/INST]" in generated_text:
|
| 110 |
+
generated_text = generated_text.split("[/INST]")[-1].strip()
|
| 111 |
+
return generated_text
|
| 112 |
+
else:
|
| 113 |
+
continue # 尝试下一个模型
|
| 114 |
+
|
| 115 |
+
# 如果是503(模型正在加载),等待并重试
|
| 116 |
+
elif response.status_code == 503:
|
| 117 |
+
error_info = response.json() if response.content else {}
|
| 118 |
+
estimated_time = error_info.get("estimated_time", 30)
|
| 119 |
+
return f"モデルを読み込み中です。約{estimated_time}秒お待ちください。しばらくしてから再度お試しください。"
|
| 120 |
+
|
| 121 |
+
# 如果是410(Gone),尝试下一个模型
|
| 122 |
+
elif response.status_code == 410:
|
| 123 |
+
continue # 尝试下一个模型
|
| 124 |
+
|
| 125 |
+
# 其他错误,尝试下一个模型
|
| 126 |
+
else:
|
| 127 |
+
continue
|
| 128 |
+
|
| 129 |
+
except requests.exceptions.Timeout:
|
| 130 |
+
continue # 超时,尝试下一个模型
|
| 131 |
+
except requests.exceptions.RequestException:
|
| 132 |
+
continue # 请求错误,尝试下一个模型
|
| 133 |
+
except Exception:
|
| 134 |
+
continue # 其他错误,尝试下一个模型
|
| 135 |
|
| 136 |
+
# 所有模型都失败,返回友好的错误信息
|
| 137 |
+
return "申し訳ございませんが、現在APIに接続できません。しばらく時間をおいてから再度お試しください。または、Spaceの設定で別のモデルを指定してください。"
|
|
|
|
|
|
|
| 138 |
|
| 139 |
# 创建Gradio界面
|
| 140 |
def create_interface():
|