|
|
import gradio as gr |
|
|
from threading import Thread |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer |
|
|
import torch |
|
|
import os |
|
|
|
|
|
|
|
|
MODEL_ID = "Qwen/Qwen3-0.6B-Base" |
|
|
|
|
|
SYSTEM_PROMPT = """As a helper/no_think |
|
|
""" |
|
|
|
|
|
print("开始加载模型和分词器...") |
|
|
try: |
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True) |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_ID, |
|
|
torch_dtype="auto", |
|
|
device_map="auto", |
|
|
trust_remote_code=True |
|
|
) |
|
|
print("模型和分词器加载成功!") |
|
|
except Exception as e: |
|
|
print(f"模型加载失败: {e}") |
|
|
raise gr.Error(f"关键错误:无法加载模型 {MODEL_ID}。错误信息: {e}") |
|
|
|
|
|
|
|
|
def predict(message, history): |
|
|
messages = [] |
|
|
|
|
|
|
|
|
messages.append({"role": "system", "content": SYSTEM_PROMPT}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
|
|
|
model_inputs = tokenizer.apply_chat_template( |
|
|
messages, |
|
|
add_generation_prompt=True, |
|
|
return_tensors="pt" |
|
|
).to(model.device) |
|
|
|
|
|
streamer = TextIteratorStreamer(tokenizer, timeout=300.0, skip_prompt=True, skip_special_tokens=True) |
|
|
|
|
|
generation_kwargs = dict( |
|
|
inputs=model_inputs, |
|
|
streamer=streamer, |
|
|
max_new_tokens=2048, |
|
|
do_sample=True, |
|
|
temperature=0.4, |
|
|
top_p=0.95, |
|
|
) |
|
|
thread = Thread(target=model.generate, kwargs=generation_kwargs) |
|
|
thread.start() |
|
|
|
|
|
full_response = "" |
|
|
for new_text in streamer: |
|
|
full_response += new_text |
|
|
yield full_response |
|
|
|
|
|
|
|
|
|
|
|
demo = gr.ChatInterface( |
|
|
fn=predict |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
demo.queue().launch(share=True) |