mingming58 commited on
Commit
47c378b
·
verified ·
1 Parent(s): 1716d5b

替换后

Browse files
Files changed (1) hide show
  1. app.py +39 -30
app.py CHANGED
@@ -1,55 +1,64 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import torch
4
 
5
- # 加载模型和分词器(选择 Qwen-1.8B-Chat 你的配置)
6
- model_name = "Qwen/Qwen-1.8B-Chat"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_name,
10
- trust_remote_code=True,
11
- torch_dtype=torch.float16, # 用半精度减少内存占用
12
  device_map="auto",
13
- load_in_4bit=True, # 4-bit 量化,进一步降低内存压力
14
  bnb_4bit_compute_dtype=torch.float16
15
  )
16
 
17
- # 创建优化后的文本生成管道
18
- chat_pipeline = pipeline(
19
- "text-generation",
20
- model=model,
21
- tokenizer=tokenizer,
22
- max_new_tokens=150, # 平衡回复长度和速度
23
- temperature=0.7,
24
- do_sample=True,
25
- num_return_sequences=1,
26
- repetition_penalty=1.1 # 减少重复回复
27
- )
28
-
29
- # 聊天函数(优化历史拼接逻辑,减少计算量)
30
  def chat_with_model(message, history):
31
- # 精简对话历史,只保留最近 3 轮,降低内存占用
32
  history = history[-3:]
33
- prompt = "用户:你好,我是你的助手。\n"
 
34
  for user_msg, bot_msg in history:
35
- prompt += f"用户:{user_msg}\n助手:{bot_msg}\n"
36
- prompt += f"用户:{message}\n助手:"
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- # 推理时限制 batch 大小,适配 2 核 CPU
39
  with torch.no_grad():
40
- response = chat_pipeline(prompt, batch_size=1)[0]["generated_text"]
41
- bot_response = response.split("助手:")[-1].strip()
 
 
 
 
 
 
 
 
 
 
42
  return bot_response
43
 
44
- # 启动Gradio界面(关闭多余组件,减少资源占用
45
  if __name__ == "__main__":
46
  gr.ChatInterface(
47
  fn=chat_with_model,
48
  title="轻量聊天助手",
49
- description="基于 Qwen-1.8B-Chat 适配 2 核 16G 配置"
50
  ).launch(
51
  server_name="0.0.0.0",
52
  server_port=7860,
53
- share=False, # 关闭分享功能,减少资源消耗
54
  inline=False
55
  )
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
+ # 加载模型和分词器(适配 Qwen1.5-1.8B-Chat 并保留优化配置)
6
+ model_name = "Qwen/Qwen1.5-1.8B-Chat"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_name,
10
+ torch_dtype=torch.float16, # 半精度减少内存占用
 
11
  device_map="auto",
12
+ load_in_4bit=True, # 4-bit 量化降低内存压力
13
  bnb_4bit_compute_dtype=torch.float16
14
  )
15
 
16
+ # 优化后的聊天函数(适配 Qwen 的对话模板)
 
 
 
 
 
 
 
 
 
 
 
 
17
  def chat_with_model(message, history):
18
+ # 只保留最近 3 轮历史减少计算量
19
  history = history[-3:]
20
+ messages = []
21
+ # 拼接历史对话
22
  for user_msg, bot_msg in history:
23
+ messages.append({"role": "user", "content": user_msg})
24
+ messages.append({"role": "assistant", "content": bot_msg})
25
+ # 加入当前用户消息
26
+ messages.append({"role": "user", "content": message})
27
+
28
+ # 生成模型输入
29
+ inputs = tokenizer.apply_chat_template(
30
+ messages,
31
+ add_generation_prompt=True,
32
+ tokenize=True,
33
+ return_dict=True,
34
+ return_tensors="pt",
35
+ ).to(model.device)
36
 
37
+ # 推理生成回复
38
  with torch.no_grad():
39
+ outputs = model.generate(
40
+ **inputs,
41
+ max_new_tokens=150,
42
+ temperature=0.7,
43
+ repetition_penalty=1.1,
44
+ do_sample=True
45
+ )
46
+ # 解码并提取回复
47
+ bot_response = tokenizer.decode(
48
+ outputs[0][inputs["input_ids"].shape[-1]:],
49
+ skip_special_tokens=True
50
+ ).strip()
51
  return bot_response
52
 
53
+ # 启动 Gradio 界面(保留资源优化配置
54
  if __name__ == "__main__":
55
  gr.ChatInterface(
56
  fn=chat_with_model,
57
  title="轻量聊天助手",
58
+ description="基于 Qwen1.5-1.8B-Chat 适配 2 核 16G 配置"
59
  ).launch(
60
  server_name="0.0.0.0",
61
  server_port=7860,
62
+ share=False,
63
  inline=False
64
  )