Update app.py
Browse files
app.py
CHANGED
|
@@ -1,209 +1,88 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
import
|
| 3 |
import json
|
| 4 |
import base64
|
| 5 |
from io import BytesIO
|
| 6 |
from PIL import Image
|
| 7 |
import os
|
| 8 |
|
| 9 |
-
|
| 10 |
-
DEFAULT_API_BASE = "https://www.gpt4novel.com/api/xiaoshuoai/ext/v1/chat/completions"
|
| 11 |
-
MODELS_CACHE = None
|
| 12 |
-
|
| 13 |
-
# ====================== 获取模型列表 ======================
|
| 14 |
-
async def fetch_available_models(api_key: str):
|
| 15 |
-
global MODELS_CACHE
|
| 16 |
-
if MODELS_CACHE is not None:
|
| 17 |
-
return MODELS_CACHE
|
| 18 |
-
|
| 19 |
-
url = DEFAULT_API_BASE.replace("/chat/completions", "/models")
|
| 20 |
-
headers = {"Authorization": f"Bearer {api_key}"}
|
| 21 |
-
|
| 22 |
-
async with aiohttp.ClientSession() as session:
|
| 23 |
-
async with session.get(url, headers=headers, timeout=15) as resp:
|
| 24 |
-
if resp.status != 200:
|
| 25 |
-
text = await resp.text()
|
| 26 |
-
raise gr.Error(f"获取模型列表失败: {resp.status} - {text}")
|
| 27 |
-
data = await resp.json()
|
| 28 |
-
models = [m["id"] for m in data.get("data", []) if "nalang" in m["id"].lower()]
|
| 29 |
-
MODELS_CACHE = models
|
| 30 |
-
return models
|
| 31 |
-
|
| 32 |
-
# ====================== 流式聊天核心函数 ======================
|
| 33 |
-
async def stream_chat(messages, api_key, model, temperature=0.7, max_tokens=800):
|
| 34 |
-
if not api_key.strip():
|
| 35 |
-
yield "请先在设置中输入API Key并连接"
|
| 36 |
-
return
|
| 37 |
-
|
| 38 |
-
headers = {
|
| 39 |
-
"Content-Type": "application/json",
|
| 40 |
-
"Authorization": f"Bearer {api_key}"
|
| 41 |
-
}
|
| 42 |
-
|
| 43 |
-
payload = {
|
| 44 |
-
"model": model,
|
| 45 |
-
"messages": messages,
|
| 46 |
-
"stream": True,
|
| 47 |
-
"temperature": temperature,
|
| 48 |
-
"max_tokens": max_tokens,
|
| 49 |
-
"top_p": 0.35,
|
| 50 |
-
"repetition_penalty": 1.05
|
| 51 |
-
}
|
| 52 |
-
|
| 53 |
-
full_response = ""
|
| 54 |
-
async with aiohttp.ClientSession() as session:
|
| 55 |
-
async with session.post(DEFAULT_API_BASE, headers=headers, json=payload) as resp:
|
| 56 |
-
if resp.status != 200:
|
| 57 |
-
text = await resp.text()
|
| 58 |
-
yield f"请求失败: {resp.status} - {text}"
|
| 59 |
-
return
|
| 60 |
-
|
| 61 |
-
async for line in resp.content:
|
| 62 |
-
line = line.decode('utf-8').strip()
|
| 63 |
-
if line.startswith("data: "):
|
| 64 |
-
data_str = line[6:]
|
| 65 |
-
if data_str == "[DONE]":
|
| 66 |
-
break
|
| 67 |
-
try:
|
| 68 |
-
data = json.loads(data_str)
|
| 69 |
-
delta = data["choices"][0]["delta"]
|
| 70 |
-
if "content" in delta:
|
| 71 |
-
content = delta["content"]
|
| 72 |
-
full_response += content
|
| 73 |
-
yield full_response
|
| 74 |
-
except:
|
| 75 |
-
pass
|
| 76 |
-
|
| 77 |
-
# ====================== 角色卡处理 ======================
|
| 78 |
-
def extract_role_card(file):
|
| 79 |
-
if file is None:
|
| 80 |
-
return "未上传角色卡", None, None
|
| 81 |
|
|
|
|
| 82 |
try:
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
{
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
return preview, messages, name
|
| 113 |
-
|
| 114 |
-
except Exception as e:
|
| 115 |
-
return f"解析角色卡失败:{str(e)}", None, None
|
| 116 |
-
|
| 117 |
-
# ====================== Gradio界面 ======================
|
| 118 |
-
with gr.Blocks(title="小说AI聊天室", theme=gr.themes.Soft()) as demo:
|
| 119 |
-
gr.Markdown("# 小说AI聊天室")
|
| 120 |
-
|
| 121 |
-
api_key = gr.State("")
|
| 122 |
-
current_model = gr.State("nalang-xl-0826-10k")
|
| 123 |
-
chat_history = gr.State([])
|
| 124 |
-
role_messages = gr.State([])
|
| 125 |
-
|
| 126 |
-
with gr.Row():
|
| 127 |
-
with gr.Column(scale=4):
|
| 128 |
-
chatbot = gr.Chatbot(height=650, show_label=False, bubble_full_width=False)
|
| 129 |
-
msg = gr.Textbox(placeholder="在这里输入消息...", container=False, lines=3)
|
| 130 |
-
with gr.Row():
|
| 131 |
-
submit_btn = gr.Button("发送", variant="primary")
|
| 132 |
-
clear_btn = gr.Button("清空对话")
|
| 133 |
-
|
| 134 |
-
with gr.Column(scale=1):
|
| 135 |
-
gr.Markdown("### 设置")
|
| 136 |
-
api_key_input = gr.Textbox(label="API Key", type="password", placeholder="sk-xxx...")
|
| 137 |
-
connect_btn = gr.Button("连接", variant="primary")
|
| 138 |
-
model_dropdown = gr.Dropdown(label="选择模型", choices=[], interactive=False)
|
| 139 |
-
status = gr.Markdown("")
|
| 140 |
-
|
| 141 |
-
gr.Markdown("### 角色卡(支持JSON / 带JSON的PNG)")
|
| 142 |
-
role_upload = gr.File(label="上传角色卡", file_types=[".json", ".png"])
|
| 143 |
-
role_preview = gr.Markdown()
|
| 144 |
-
use_role_btn = gr.Button("使用该角色聊天", variant="secondary")
|
| 145 |
-
|
| 146 |
-
# ------------------ 连接API ------------------
|
| 147 |
-
async def connect_api(key):
|
| 148 |
-
if not key.strip():
|
| 149 |
-
return "API Key不能为空", [], False
|
| 150 |
-
try:
|
| 151 |
-
models = await fetch_available_models(key)
|
| 152 |
-
return "连接成功!可选择模型", models, True
|
| 153 |
-
except Exception as e:
|
| 154 |
-
return f"连接失败:{str(e)}", [], False
|
| 155 |
-
|
| 156 |
-
connect_btn.click(
|
| 157 |
-
connect_api,
|
| 158 |
-
inputs=api_key_input,
|
| 159 |
-
outputs=[status, model_dropdown, model_dropdown.interactive]
|
| 160 |
-
).then(lambda key: key, inputs=api_key_input, outputs=api_key)
|
| 161 |
-
|
| 162 |
-
# ------------------ 选择模型 ------------------
|
| 163 |
-
model_dropdown.change(
|
| 164 |
-
lambda m: m,
|
| 165 |
-
inputs=model_dropdown,
|
| 166 |
-
outputs=current_model
|
| 167 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
|
| 169 |
-
|
| 170 |
-
def user_submit(message, history, role_msgs):
|
| 171 |
-
if not message.strip():
|
| 172 |
-
return "", history
|
| 173 |
-
history.append((message, None))
|
| 174 |
-
return "", history
|
| 175 |
-
|
| 176 |
-
async def bot_response(history, api_key_val, model_val, role_msgs):
|
| 177 |
-
messages = role_msgs + [{"role": "user" if i % 2 == 0 else "assistant", "content": msg} for i, (msg, _) in enumerate(history)]
|
| 178 |
-
messages = messages[:-1] # 去掉最后一个None
|
| 179 |
-
|
| 180 |
-
response = ""
|
| 181 |
-
async for chunk in stream_chat(messages, api_key_val, model_val):
|
| 182 |
-
response = chunk
|
| 183 |
-
history[-1] = (history[-1][0], response)
|
| 184 |
-
yield history
|
| 185 |
-
|
| 186 |
-
submit_btn.click(user_submit, [msg, chatbot, role_messages], [msg, chatbot])\
|
| 187 |
-
.then(bot_response, [chatbot, api_key, current_model, role_messages], chatbot)
|
| 188 |
-
|
| 189 |
-
msg.submit(user_submit, [msg, chatbot, role_messages], [msg, chatbot])\
|
| 190 |
-
.then(bot_response, [chatbot, api_key, current_model, role_messages], chatbot)
|
| 191 |
-
|
| 192 |
-
# ------------------ 加载角色卡 ------------------
|
| 193 |
-
role_upload.upload(extract_role_card, inputs=role_upload, outputs=[role_preview, role_messages, gr.State()])
|
| 194 |
|
| 195 |
-
def
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
# 清空当前对话,加载角色首句
|
| 199 |
-
new_history = []
|
| 200 |
-
if len(role_msgs) > 1 and role_msgs[1]["role"] == "assistant":
|
| 201 |
-
new_history.append((None, role_msgs[1]["content"]))
|
| 202 |
-
return new_history
|
| 203 |
|
| 204 |
-
|
| 205 |
|
| 206 |
-
|
| 207 |
-
|
| 208 |
|
| 209 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
import json
|
| 4 |
import base64
|
| 5 |
from io import BytesIO
|
| 6 |
from PIL import Image
|
| 7 |
import os
|
| 8 |
|
| 9 |
+
API_BASE = "https://www.gpt4novel.com/api/xiaoshuoai/ext/v1"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
def get_models(api_key):
|
| 12 |
try:
|
| 13 |
+
r = requests.get(f"{API_BASE}/models", headers={"Authorization": f"Bearer {api_key}"})
|
| 14 |
+
models = [m["id"] for m in r.json()["data"]]
|
| 15 |
+
return sorted(models, reverse=True)
|
| 16 |
+
except:
|
| 17 |
+
return ["nalang-xl-0826-10k"]
|
| 18 |
+
|
| 19 |
+
def extract_card_from_png(file_obj):
|
| 20 |
+
if not file_obj.name.endswith(".png"):
|
| 21 |
+
return None
|
| 22 |
+
img = Image.open(file_obj.name)
|
| 23 |
+
data = img.info.get("chara")
|
| 24 |
+
if data:
|
| 25 |
+
decoded = base64.b64decode(data).decode()
|
| 26 |
+
return json.loads(decoded)
|
| 27 |
+
return None
|
| 28 |
+
|
| 29 |
+
def chat(message, history, model, api_key, system_prompt):
|
| 30 |
+
messages = [{"role": "system", "content": system_prompt or "你是一个有帮助的AI助手。"}]
|
| 31 |
+
for h in history:
|
| 32 |
+
messages.append({"role": "user", "content": h[0]})
|
| 33 |
+
if h[1]: messages.append({"role": "assistant", "content": h[1]})
|
| 34 |
+
messages.append({"role": "user", "content": message})
|
| 35 |
+
|
| 36 |
+
resp = requests.post(
|
| 37 |
+
f"{API_BASE}/chat/completions",
|
| 38 |
+
json={"model": model, "messages": messages, "stream": True, "temperature": 0.7, "max_tokens": 800},
|
| 39 |
+
headers={"Authorization": f"Bearer {api_key}"},
|
| 40 |
+
stream=True,
|
| 41 |
+
timeout=60
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
)
|
| 43 |
+
resp.raise_for_status()
|
| 44 |
+
for line in resp.iter_lines():
|
| 45 |
+
if line and line.startswith(b"data: "):
|
| 46 |
+
try:
|
| 47 |
+
data = json.loads(line[6:].decode())
|
| 48 |
+
if token := data["choices"][0]["delta"].get("content"):
|
| 49 |
+
yield token
|
| 50 |
+
except:
|
| 51 |
+
continue
|
| 52 |
+
|
| 53 |
+
with gr.Blocks() as demo:
|
| 54 |
+
gr.HTML(open("index.html").read())
|
| 55 |
+
chatbot = gr.Chatbot(height="70vh")
|
| 56 |
+
with gr.Row():
|
| 57 |
+
msg = gr.Textbox(scale=8, placeholder="发送消息...", container=False)
|
| 58 |
+
send = gr.Button("发送", scale=1)
|
| 59 |
+
model = gr.Dropdown(choices=[], label="模型")
|
| 60 |
+
api_key = gr.Textbox(label="API Key", type="password")
|
| 61 |
+
card_file = gr.File(label="导入角色卡 (.json 或 .png)", file_types=[".json", ".png"])
|
| 62 |
+
|
| 63 |
+
system_prompt = gr.Textbox(label="系统提示词(角色卡自动填充)", lines=4, value="你是一个有帮助的AI助手。")
|
| 64 |
+
|
| 65 |
+
def load_card(file):
|
| 66 |
+
if not file: return "", []
|
| 67 |
+
if str(file).endswith(".png"):
|
| 68 |
+
card = extract_card_from_png(file)
|
| 69 |
+
else:
|
| 70 |
+
card = json.load(open(file.name, encoding="utf-8"))
|
| 71 |
+
if card:
|
| 72 |
+
prompt = card.get("data", card.get("system_prompt", card.get("description", "")))
|
| 73 |
+
name = card.get("name", card.get("data", {}).get("name", "角色"))
|
| 74 |
+
return prompt, [[None, f"已加载角色卡:{name}"]]
|
| 75 |
+
return "", []
|
| 76 |
|
| 77 |
+
card_file.change(load_card, card_file, [system_prompt, chatbot])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
+
def connect_key(key):
|
| 80 |
+
models = get_models(key)
|
| 81 |
+
return gr.Dropdown(choices=models, value=models[0] if models else None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
+
api_key.submit(connect_key, api_key, model)
|
| 84 |
|
| 85 |
+
send.click(chat, [msg, chatbot, model, api_key, system_prompt], chatbot).then(lambda: "", None, msg)
|
| 86 |
+
msg.submit(chat, [msg, chatbot, model, api_key, system_prompt], chatbot).then(lambda: "", None, msg)
|
| 87 |
|
| 88 |
+
demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=7860)
|