mingming58 commited on
Commit
f1a0f6f
·
verified ·
1 Parent(s): d341e54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -10,26 +10,27 @@ model = AutoModelForCausalLM.from_pretrained(
10
  trust_remote_code=True
11
  )
12
 
13
- # 创建聊天管道
14
  chat_pipeline = pipeline(
15
- "conversational",
16
  model=model,
17
- tokenizer=tokenizer
 
18
  )
19
 
20
  # 聊天函数
21
  def chat_with_model(message, history):
22
- # 把历史对话转换成模型需要的格式
23
- history_transformer_format = []
24
  for user_msg, bot_msg in history:
25
- history_transformer_format.append({"role": "user", "content": user_msg})
26
- history_transformer_format.append({"role": "assistant", "content": bot_msg})
27
- # 添加当前用户消息
28
- history_transformer_format.append({"role": "user", "content": message})
29
 
30
  # 获取模型回复
31
- response = chat_pipeline(history_transformer_format)
32
- return response[-1]["content"]
 
 
33
 
34
  # 启动Gradio聊天界面
35
  if __name__ == "__main__":
 
10
  trust_remote_code=True
11
  )
12
 
13
+ # 创建文本生成管道(替换原有的 conversational 管道)
14
  chat_pipeline = pipeline(
15
+ "text-generation",
16
  model=model,
17
+ tokenizer=tokenizer,
18
+ max_new_tokens=100 # 限制回复长度,避免过长
19
  )
20
 
21
  # 聊天函数
22
  def chat_with_model(message, history):
23
+ # 构建对话历史
24
+ prompt = ""
25
  for user_msg, bot_msg in history:
26
+ prompt += f"用户:{user_msg}\n助手:{bot_msg}\n"
27
+ prompt += f"用户:{message}\n助手:"
 
 
28
 
29
  # 获取模型回复
30
+ response = chat_pipeline(prompt, temperature=0.7)[0]["generated_text"]
31
+ # 提取助手的回复
32
+ bot_response = response.split("助手:")[-1].strip()
33
+ return bot_response
34
 
35
  # 启动Gradio聊天界面
36
  if __name__ == "__main__":