ST-THOMAS-OF-AQUINAS commited on
Commit
8a9ad09
·
verified ·
1 Parent(s): 9dcfd91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -12
app.py CHANGED
@@ -21,14 +21,25 @@ SYSTEM_MESSAGE = {
21
  }
22
 
23
  def chat(user_input, messages):
24
- # Initialize messages if empty
25
  if not messages:
26
  messages = [SYSTEM_MESSAGE]
27
 
28
- messages.append({"role": "user", "content": user_input})
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  prompt = tokenizer.apply_chat_template(
31
- messages,
32
  tokenize=False,
33
  add_generation_prompt=True
34
  )
@@ -48,9 +59,12 @@ def chat(user_input, messages):
48
  skip_special_tokens=True
49
  )
50
 
51
- messages.append({"role": "assistant", "content": response})
 
 
 
52
 
53
- return messages, ""
54
 
55
  with gr.Blocks() as demo:
56
  gr.Markdown("## 🤖 Qwen Chatbot")
@@ -62,13 +76,7 @@ with gr.Blocks() as demo:
62
  msg.submit(
63
  chat,
64
  inputs=[msg, state],
65
- outputs=[chatbot, msg],
66
- )
67
-
68
- chatbot.change(
69
- lambda x: x,
70
- chatbot,
71
- state
72
  )
73
 
74
  demo.launch()
 
21
  }
22
 
23
  def chat(user_input, messages):
24
+ # Initialize clean messages
25
  if not messages:
26
  messages = [SYSTEM_MESSAGE]
27
 
28
+ # Safety: ensure all contents are strings
29
+ clean_messages = []
30
+ for m in messages:
31
+ clean_messages.append({
32
+ "role": m["role"],
33
+ "content": str(m["content"])
34
+ })
35
+
36
+ clean_messages.append({
37
+ "role": "user",
38
+ "content": str(user_input)
39
+ })
40
 
41
  prompt = tokenizer.apply_chat_template(
42
+ clean_messages,
43
  tokenize=False,
44
  add_generation_prompt=True
45
  )
 
59
  skip_special_tokens=True
60
  )
61
 
62
+ clean_messages.append({
63
+ "role": "assistant",
64
+ "content": response
65
+ })
66
 
67
+ return clean_messages, ""
68
 
69
  with gr.Blocks() as demo:
70
  gr.Markdown("## 🤖 Qwen Chatbot")
 
76
  msg.submit(
77
  chat,
78
  inputs=[msg, state],
79
+ outputs=[chatbot, state]
 
 
 
 
 
 
80
  )
81
 
82
  demo.launch()