nothingworry commited on
Commit
aa77c66
·
1 Parent(s): cb54b4d

Fix Gradio Chatbot format compatibility: convert dict to tuple format for 4.20.0

Browse files
Files changed (1) hide show
  1. app.py +89 -13
app.py CHANGED
@@ -39,6 +39,78 @@ def can_view_analytics(role: str) -> bool:
39
  return role in VALID_ROLES # All roles can view analytics
40
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def chat_with_agent(message, tenant_id, role, history):
43
  """
44
  Send a message to the backend MCP agent and return the response.
@@ -58,19 +130,22 @@ def chat_with_agent(message, tenant_id, role, history):
58
  Yields:
59
  Updated chat history with agent response (streaming character-by-character)
60
  """
 
 
 
61
  if not message or not message.strip():
62
  yield history
63
  return
64
 
65
  if not tenant_id or not tenant_id.strip():
66
  error_msg = "Please enter a Tenant ID before sending a message."
67
- history.append({"role": "user", "content": message})
68
- history.append({"role": "assistant", "content": error_msg})
69
  yield history
70
  return
71
 
72
  # Add user message to history
73
- history.append({"role": "user", "content": message})
74
 
75
  # Backend streaming endpoint
76
  backend_url = f"{BACKEND_BASE_URL}/agent/message/stream"
@@ -104,7 +179,7 @@ def chat_with_agent(message, tenant_id, role, history):
104
  if response.status_code == 200:
105
  # Initialize assistant message
106
  assistant_message = ""
107
- history.append({"role": "assistant", "content": assistant_message})
108
  yield history # Yield initial empty message
109
 
110
  # Stream tokens character-by-character for smooth UX
@@ -126,7 +201,7 @@ def chat_with_agent(message, tenant_id, role, history):
126
  status_msg = data.get('message', '')
127
  if status_msg:
128
  # Show status in the message temporarily
129
- history[-1] = {"role": "assistant", "content": f"⏳ {status_msg}"}
130
  yield history
131
  continue
132
 
@@ -135,7 +210,7 @@ def chat_with_agent(message, tenant_id, role, history):
135
  if token:
136
  assistant_message += token
137
  # Update the last message in history
138
- history[-1] = {"role": "assistant", "content": assistant_message}
139
  yield history # Yield updated history immediately for smooth character-by-character display
140
 
141
  if data.get('done', False):
@@ -146,7 +221,7 @@ def chat_with_agent(message, tenant_id, role, history):
146
  try:
147
  error_data = json.loads(line[6:])
148
  error_msg = error_data.get('error', 'Unknown error')
149
- history[-1] = {"role": "assistant", "content": f"❌ Error: {error_msg}"}
150
  yield history
151
  break
152
  except:
@@ -155,27 +230,27 @@ def chat_with_agent(message, tenant_id, role, history):
155
  continue
156
  else:
157
  error_msg = f"Error {response.status_code}: {response.text}"
158
- history.append({"role": "assistant", "content": error_msg})
159
  yield history
160
 
161
  except requests.exceptions.ConnectionError:
162
  error_msg = "❌ Connection Error: Could not connect to backend. Please ensure the FastAPI server is running at http://localhost:8000"
163
- history.append({"role": "assistant", "content": error_msg})
164
  yield history
165
 
166
  except requests.exceptions.Timeout:
167
  error_msg = "⏱️ Request Timeout: The backend took longer than 2 minutes to respond. This may happen if:\n- The LLM is processing a complex query\n- Multiple tools (RAG, Web Search) are being used\n- The backend is under heavy load\n\nPlease try again with a simpler query, or check if the backend services (Ollama, MCP servers) are running properly."
168
- history.append({"role": "assistant", "content": error_msg})
169
  yield history
170
 
171
  except requests.exceptions.RequestException as e:
172
  error_msg = f"❌ Request Error: {str(e)}"
173
- history.append({"role": "assistant", "content": error_msg})
174
  yield history
175
 
176
  except Exception as e:
177
  error_msg = f"❌ Unexpected Error: {str(e)}"
178
- history.append({"role": "assistant", "content": error_msg})
179
  yield history
180
 
181
 
@@ -1869,7 +1944,8 @@ with gr.Blocks(
1869
  except Exception as e:
1870
  # Fallback if streaming fails
1871
  error_msg = f"Streaming error: {str(e)}"
1872
- history.append({"role": "assistant", "content": error_msg})
 
1873
  yield history, message_input_value
1874
 
1875
  send_button.click(
 
39
  return role in VALID_ROLES # All roles can view analytics
40
 
41
 
42
+ def convert_history_to_tuples(history):
43
+ """
44
+ Convert history from dict format to tuple format for Gradio 4.20.0 compatibility.
45
+
46
+ Input format: [{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}]
47
+ Output format: [("user message", "assistant response"), ...]
48
+ """
49
+ if not history:
50
+ return []
51
+
52
+ # If already in tuple format, return as-is
53
+ if history and isinstance(history[0], (tuple, list)) and len(history[0]) == 2:
54
+ return history
55
+
56
+ # Convert dict format to tuple format
57
+ result = []
58
+ current_user = None
59
+ current_assistant = None
60
+
61
+ for item in history:
62
+ if isinstance(item, dict):
63
+ if item.get("role") == "user":
64
+ # If we have a pending assistant message, save the pair
65
+ if current_user is not None and current_assistant is not None:
66
+ result.append((current_user, current_assistant))
67
+ current_user = item.get("content", "")
68
+ current_assistant = None
69
+ elif item.get("role") == "assistant":
70
+ current_assistant = item.get("content", "")
71
+ elif isinstance(item, (tuple, list)) and len(item) == 2:
72
+ # Already in tuple format
73
+ result.append(tuple(item))
74
+
75
+ # Add the last pair if exists
76
+ if current_user is not None:
77
+ result.append((current_user, current_assistant or ""))
78
+
79
+ return result
80
+
81
+
82
+ def append_to_history(history, role, content):
83
+ """
84
+ Append a message to history in tuple format for Gradio 4.20.0.
85
+ """
86
+ history = convert_history_to_tuples(history)
87
+
88
+ if role == "user":
89
+ # For user messages, we need to add a new tuple with empty assistant response
90
+ history.append((content, ""))
91
+ elif role == "assistant":
92
+ # For assistant messages, update the last tuple's assistant part
93
+ if history and len(history[-1]) == 2:
94
+ user_msg = history[-1][0]
95
+ history[-1] = (user_msg, content)
96
+ else:
97
+ # If no user message exists, create one with empty user
98
+ history.append(("", content))
99
+
100
+ return history
101
+
102
+
103
+ def update_last_assistant_message(history, content):
104
+ """
105
+ Update the last assistant message in history (tuple format).
106
+ """
107
+ history = convert_history_to_tuples(history)
108
+ if history and len(history[-1]) == 2:
109
+ user_msg = history[-1][0]
110
+ history[-1] = (user_msg, content)
111
+ return history
112
+
113
+
114
  def chat_with_agent(message, tenant_id, role, history):
115
  """
116
  Send a message to the backend MCP agent and return the response.
 
130
  Yields:
131
  Updated chat history with agent response (streaming character-by-character)
132
  """
133
+ # Convert history to tuple format for Gradio 4.20.0 compatibility
134
+ history = convert_history_to_tuples(history)
135
+
136
  if not message or not message.strip():
137
  yield history
138
  return
139
 
140
  if not tenant_id or not tenant_id.strip():
141
  error_msg = "Please enter a Tenant ID before sending a message."
142
+ history = append_to_history(history, "user", message)
143
+ history = append_to_history(history, "assistant", error_msg)
144
  yield history
145
  return
146
 
147
  # Add user message to history
148
+ history = append_to_history(history, "user", message)
149
 
150
  # Backend streaming endpoint
151
  backend_url = f"{BACKEND_BASE_URL}/agent/message/stream"
 
179
  if response.status_code == 200:
180
  # Initialize assistant message
181
  assistant_message = ""
182
+ history = append_to_history(history, "assistant", assistant_message)
183
  yield history # Yield initial empty message
184
 
185
  # Stream tokens character-by-character for smooth UX
 
201
  status_msg = data.get('message', '')
202
  if status_msg:
203
  # Show status in the message temporarily
204
+ history = update_last_assistant_message(history, f"⏳ {status_msg}")
205
  yield history
206
  continue
207
 
 
210
  if token:
211
  assistant_message += token
212
  # Update the last message in history
213
+ history = update_last_assistant_message(history, assistant_message)
214
  yield history # Yield updated history immediately for smooth character-by-character display
215
 
216
  if data.get('done', False):
 
221
  try:
222
  error_data = json.loads(line[6:])
223
  error_msg = error_data.get('error', 'Unknown error')
224
+ history = update_last_assistant_message(history, f"❌ Error: {error_msg}")
225
  yield history
226
  break
227
  except:
 
230
  continue
231
  else:
232
  error_msg = f"Error {response.status_code}: {response.text}"
233
+ history = append_to_history(history, "assistant", error_msg)
234
  yield history
235
 
236
  except requests.exceptions.ConnectionError:
237
  error_msg = "❌ Connection Error: Could not connect to backend. Please ensure the FastAPI server is running at http://localhost:8000"
238
+ history = append_to_history(history, "assistant", error_msg)
239
  yield history
240
 
241
  except requests.exceptions.Timeout:
242
  error_msg = "⏱️ Request Timeout: The backend took longer than 2 minutes to respond. This may happen if:\n- The LLM is processing a complex query\n- Multiple tools (RAG, Web Search) are being used\n- The backend is under heavy load\n\nPlease try again with a simpler query, or check if the backend services (Ollama, MCP servers) are running properly."
243
+ history = append_to_history(history, "assistant", error_msg)
244
  yield history
245
 
246
  except requests.exceptions.RequestException as e:
247
  error_msg = f"❌ Request Error: {str(e)}"
248
+ history = append_to_history(history, "assistant", error_msg)
249
  yield history
250
 
251
  except Exception as e:
252
  error_msg = f"❌ Unexpected Error: {str(e)}"
253
+ history = append_to_history(history, "assistant", error_msg)
254
  yield history
255
 
256
 
 
1944
  except Exception as e:
1945
  # Fallback if streaming fails
1946
  error_msg = f"Streaming error: {str(e)}"
1947
+ history = convert_history_to_tuples(history)
1948
+ history = append_to_history(history, "assistant", error_msg)
1949
  yield history, message_input_value
1950
 
1951
  send_button.click(