willn9 commited on
Commit
5c21691
·
verified ·
1 Parent(s): 5590758

updated code for Gradio 6.9.0

Browse files
Files changed (1) hide show
  1. app.py +21 -14
app.py CHANGED
@@ -22,7 +22,7 @@ def make_system_message(system_message, demographics, occupation, psychographics
22
 
23
  def stream_chat(
24
  message,
25
- history, # list[list[str, str]] from gr.Chatbot
26
  system_message,
27
  demographics,
28
  occupation,
@@ -34,43 +34,49 @@ def stream_chat(
34
  top_p,
35
  ):
36
  """
37
- Streaming generator that yields the progressively updated chat history.
38
  """
39
  # 1) Build system + conversation messages
40
  sys_msg = make_system_message(system_message, demographics, occupation, psychographics, buying_habits, critical_mode)
41
 
42
  messages = [{"role": "system", "content": sys_msg}]
43
- for user_msg, assistant_msg in (history or []):
44
- if user_msg:
45
- messages.append({"role": "user", "content": user_msg})
46
- if assistant_msg:
47
- messages.append({"role": "assistant", "content": assistant_msg})
 
48
  messages.append({"role": "user", "content": message})
49
 
50
- # 2) Start streaming back to the Chatbot
51
- running_reply = ""
 
 
 
52
  # Optimistically show the assistant "typing"
53
- running_history = (history or []) + [[message, ""]]
54
  yield running_history
55
 
56
  try:
57
  response = client.chat.completions.create(
58
- model="gpt-4o-mini", # adjust if needed
59
  messages=messages,
60
  max_tokens=int(max_tokens),
61
  temperature=float(temp),
62
  top_p=float(top_p),
63
  stream=True,
64
  )
 
 
65
  for chunk in response:
66
  if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
67
  token = chunk.choices[0].delta.content
68
  running_reply += token
69
- running_history[-1][1] = running_reply
 
70
  # Yield the whole history each time so the UI updates
71
  yield running_history
72
  except Exception as e:
73
- running_history[-1][1] = f"❌ An error occurred: {str(e)}"
74
  yield running_history
75
 
76
 
@@ -82,7 +88,8 @@ with gr.Blocks(title="Virtual Consumer Persona – Live Focus Group!") as demo:
82
  *Powered by OpenAI GPT-4o-mini. Developed by wn.*
83
  """)
84
 
85
- chatbot = gr.Chatbot(height=450)
 
86
 
87
  with gr.Column():
88
  instructions = gr.Textbox(
 
22
 
23
  def stream_chat(
24
  message,
25
+ history, # In Gradio 6, this is a list of dicts: [{"role": "user", "content": "..."}]
26
  system_message,
27
  demographics,
28
  occupation,
 
34
  top_p,
35
  ):
36
  """
37
+ Streaming generator that yields the progressively updated chat history using Gradio 6's message format.
38
  """
39
  # 1) Build system + conversation messages
40
  sys_msg = make_system_message(system_message, demographics, occupation, psychographics, buying_habits, critical_mode)
41
 
42
  messages = [{"role": "system", "content": sys_msg}]
43
+
44
+ # Append past history (Gradio 6 format is already compatible with OpenAI's format!)
45
+ for msg in (history or []):
46
+ messages.append({"role": msg["role"], "content": msg["content"]})
47
+
48
+ # Append current user message
49
  messages.append({"role": "user", "content": message})
50
 
51
+ # 2) Setup the history to send back to the Gradio UI
52
+ running_history = (history or []).copy()
53
+ running_history.append({"role": "user", "content": message})
54
+ running_history.append({"role": "assistant", "content": ""})
55
+
56
  # Optimistically show the assistant "typing"
 
57
  yield running_history
58
 
59
  try:
60
  response = client.chat.completions.create(
61
+ model="gpt-4o-mini",
62
  messages=messages,
63
  max_tokens=int(max_tokens),
64
  temperature=float(temp),
65
  top_p=float(top_p),
66
  stream=True,
67
  )
68
+
69
+ running_reply = ""
70
  for chunk in response:
71
  if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
72
  token = chunk.choices[0].delta.content
73
  running_reply += token
74
+ # Update the content of the last message in the history
75
+ running_history[-1]["content"] = running_reply
76
  # Yield the whole history each time so the UI updates
77
  yield running_history
78
  except Exception as e:
79
+ running_history[-1]["content"] = f"❌ An error occurred: {str(e)}"
80
  yield running_history
81
 
82
 
 
88
  *Powered by OpenAI GPT-4o-mini. Developed by wn.*
89
  """)
90
 
91
+ # Explicitly set type="messages" for Gradio 6 compatibility
92
+ chatbot = gr.Chatbot(height=450, type="messages")
93
 
94
  with gr.Column():
95
  instructions = gr.Textbox(