tosei0000 commited on
Commit
9b5ea7d
·
verified ·
1 Parent(s): 15e185b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -11
app.py CHANGED
@@ -1,18 +1,85 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
 
3
 
4
- model_path = "tosei0000/chatbot"
 
5
 
6
- tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
7
- model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
 
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model.to(device)
 
11
 
12
- def chat(prompt, max_new_tokens=100):
13
- inputs = tokenizer(prompt, return_tensors="pt").to(device)
14
- outputs = model.generate(**inputs, max_new_tokens=max_new_tokens)
15
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
16
 
17
- response = chat("こんにちは!")
18
- print(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
+ import gradio as gr
4
 
5
+ # 模型名称(可以换成你自己的Qwen2模型)
6
+ model_name = "tosei0000/chatbot"
7
 
8
+ # 加载 tokenizer model
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
11
 
12
+ # 设置 pad_token_id(避免警告和生成错误)
13
+ tokenizer.pad_token_id = tokenizer.eos_token_id
14
+ model.config.pad_token_id = tokenizer.eos_token_id
15
 
16
+ # 聊天历史存储
17
+ chat_history = []
 
 
18
 
19
+ # 多轮对话生成函数
20
+ def chat(user_input, history):
21
+ # 构造 prompt(把历史拼接起来)
22
+ prompt = ""
23
+ for i, (user_msg, bot_msg) in enumerate(history):
24
+ prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n"
25
+ prompt += f"User: {user_input}\nAssistant:"
26
+
27
+ # 编码输入
28
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
29
+
30
+ # 生成
31
+ output = model.generate(
32
+ **inputs,
33
+ max_new_tokens=256,
34
+ do_sample=True,
35
+ temperature=0.7,
36
+ top_p=0.9,
37
+ pad_token_id=tokenizer.pad_token_id,
38
+ eos_token_id=tokenizer.eos_token_id
39
+ )
40
+
41
+ # 解码
42
+ decoded = tokenizer.decode(output[0], skip_special_tokens=True)
43
+
44
+ # 提取模型最新回复部分(去掉前面的prompt)
45
+ response = decoded[len(prompt):].strip().split("\n")[0]
46
+
47
+ # 更新历史
48
+ history.append((user_input, response))
49
+ return history, history
50
+
51
+ # 创建 Gradio 接口
52
+ with gr.Blocks(title="Qwen2 聊天机器人") as demo:
53
+ gr.Markdown("## 🤖 Qwen2 Chatbot")
54
+ chatbot = gr.Chatbot()
55
+ msg = gr.Textbox(label="输入你的问题")
56
+ clear = gr.Button("清除对话")
57
+
58
+ state = gr.State([]) # 存储历史
59
+
60
+ msg.submit(chat, [msg, state], [chatbot, state])
61
+ clear.click(lambda: ([], []), None, [chatbot, state])
62
+
63
+ # 启动 Gradio
64
+ if __name__ == "__main__":
65
+ demo.launch()
66
+
67
+
68
+ # from transformers import AutoTokenizer, AutoModelForCausalLM
69
+ # import torch
70
+
71
+ # model_path = "tosei0000/chatbot"
72
+
73
+ # tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
74
+ # model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
75
+
76
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
77
+ # model.to(device)
78
+
79
+ # def chat(prompt, max_new_tokens=100):
80
+ # inputs = tokenizer(prompt, return_tensors="pt").to(device)
81
+ # outputs = model.generate(**inputs, max_new_tokens=max_new_tokens)
82
+ # return tokenizer.decode(outputs[0], skip_special_tokens=True)
83
+
84
+ # response = chat("こんにちは!")
85
+ # print(response)