redhairedshanks1 commited on
Commit
cd9e047
·
1 Parent(s): 2eb5a6d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -18
app.py CHANGED
@@ -141,6 +141,25 @@ def handle_file_upload(file_path, session_id):
141
  return file_path, json.dumps(status, indent=2), session_id
142
 
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  def chatbot_response_streaming(message: str, history: List, session_id: str, file_path: str = None):
145
  """
146
  Handle chat messages with streaming updates
@@ -175,12 +194,12 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
175
  }
176
  response_text = f"```json\n{json.dumps(response, indent=2)}\n```"
177
  session_manager.add_message(session_id, "assistant", response_text)
178
- yield history + [[message, response_text]]
179
  return
180
 
181
  try:
182
  # Generate pipeline using Bedrock → Gemini fallback
183
- yield history + [[message, "🤖 Generating pipeline with AI...\n⏳ Trying Bedrock first..."]]
184
 
185
  pipeline = generate_pipeline(
186
  user_input=message,
@@ -201,7 +220,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
201
  response_text = formatted_display + f"\n\n```json\n{json.dumps(pipeline, indent=2)}\n```"
202
 
203
  session_manager.add_message(session_id, "assistant", response_text)
204
- yield history + [[message, response_text]]
205
  return
206
 
207
  except Exception as e:
@@ -213,7 +232,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
213
  }
214
  response_text = f"```json\n{json.dumps(error_response, indent=2)}\n```"
215
  session_manager.add_message(session_id, "assistant", response_text)
216
- yield history + [[message, response_text]]
217
  return
218
 
219
  # ========================
@@ -237,7 +256,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
237
  "steps": []
238
  }
239
  accumulated_response = f"```json\n{json.dumps(initial_status, indent=2)}\n```"
240
- yield history + [[message, accumulated_response]]
241
 
242
  steps_completed = []
243
  final_payload = None
@@ -261,7 +280,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
261
  "executor": event.get("executor", "unknown")
262
  }
263
  accumulated_response = f"```json\n{json.dumps(info_status, indent=2)}\n```"
264
- yield history + [[message, accumulated_response]]
265
 
266
  # Step updates
267
  elif event_type == "step":
@@ -282,7 +301,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
282
  "steps_completed": steps_completed
283
  }
284
  accumulated_response = f"```json\n{json.dumps(progress_status, indent=2)}\n```"
285
- yield history + [[message, accumulated_response]]
286
 
287
  # Final result
288
  elif event_type == "final":
@@ -300,7 +319,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
300
  final_response = f"```json\n{json.dumps(error_result, indent=2)}\n```"
301
  session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
302
  session_manager.add_message(session_id, "assistant", final_response)
303
- yield history + [[message, final_response]]
304
  return
305
 
306
  # Process final result
@@ -336,7 +355,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
336
  session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
337
 
338
  session_manager.add_message(session_id, "assistant", final_response)
339
- yield history + [[message, final_response]]
340
  return
341
 
342
  except Exception as e:
@@ -349,7 +368,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
349
  final_response = f"```json\n{json.dumps(error_result, indent=2)}\n```"
350
  session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
351
  session_manager.add_message(session_id, "assistant", final_response)
352
- yield history + [[message, final_response]]
353
  return
354
 
355
  # REJECT - Cancel the pipeline
@@ -365,7 +384,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
365
  }
366
  response = f"```json\n{json.dumps(response_data, indent=2)}\n```"
367
  session_manager.add_message(session_id, "assistant", response)
368
- yield history + [[message, response]]
369
  return
370
 
371
  # EDIT - Request modifications
@@ -385,7 +404,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
385
  }
386
  response = f"```json\n{json.dumps(edit_help, indent=2)}\n```"
387
  session_manager.add_message(session_id, "assistant", response)
388
- yield history + [[message, response]]
389
  return
390
 
391
  # Try to modify pipeline based on user input
@@ -410,7 +429,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
410
  formatted = format_pipeline_for_display(new_pipeline)
411
  response = formatted + f"\n\n```json\n{json.dumps(new_pipeline, indent=2)}\n```"
412
  session_manager.add_message(session_id, "assistant", response)
413
- yield history + [[message, response]]
414
  return
415
 
416
  except Exception as e:
@@ -422,7 +441,7 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
422
  }
423
  response = f"```json\n{json.dumps(error_response, indent=2)}\n```"
424
  session_manager.add_message(session_id, "assistant", response)
425
- yield history + [[message, response]]
426
  return
427
 
428
  # Default waiting message
@@ -433,13 +452,13 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
433
  }
434
  response = f"```json\n{json.dumps(response_data, indent=2)}\n```"
435
  session_manager.add_message(session_id, "assistant", response)
436
- yield history + [[message, response]]
437
  return
438
 
439
  # Default fallback
440
  response = json.dumps({"status": "ready", "message": "Ready for your next instruction"}, indent=2)
441
  session_manager.add_message(session_id, "assistant", response)
442
- yield history + [[message, response]]
443
 
444
 
445
  # ========================
@@ -462,8 +481,8 @@ with gr.Blocks(title="MasterLLM v2.0 - AI Pipeline Orchestrator") as demo:
462
 
463
  with gr.Row():
464
  with gr.Column(scale=3):
465
- # Chat interface (simplified for older Gradio)
466
- chatbot = gr.Chatbot(label="Chat")
467
 
468
  # Text input
469
  msg = gr.Textbox(
 
141
  return file_path, json.dumps(status, indent=2), session_id
142
 
143
 
144
+ def format_chat_history(history, new_user_msg, new_assistant_msg):
145
+ """
146
+ Convert chat history to new Gradio format (list of dicts with role/content)
147
+ Gradio 5.x expects: [{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}]
148
+ """
149
+ messages = []
150
+
151
+ # Add existing history
152
+ for user_msg, bot_msg in history:
153
+ messages.append({"role": "user", "content": user_msg})
154
+ messages.append({"role": "assistant", "content": bot_msg})
155
+
156
+ # Add new messages
157
+ messages.append({"role": "user", "content": new_user_msg})
158
+ messages.append({"role": "assistant", "content": new_assistant_msg})
159
+
160
+ return messages
161
+
162
+
163
  def chatbot_response_streaming(message: str, history: List, session_id: str, file_path: str = None):
164
  """
165
  Handle chat messages with streaming updates
 
194
  }
195
  response_text = f"```json\n{json.dumps(response, indent=2)}\n```"
196
  session_manager.add_message(session_id, "assistant", response_text)
197
+ yield format_chat_history(history, message, response_text)
198
  return
199
 
200
  try:
201
  # Generate pipeline using Bedrock → Gemini fallback
202
+ yield format_chat_history(history, message, "🤖 Generating pipeline with AI...\n⏳ Trying Bedrock first...")
203
 
204
  pipeline = generate_pipeline(
205
  user_input=message,
 
220
  response_text = formatted_display + f"\n\n```json\n{json.dumps(pipeline, indent=2)}\n```"
221
 
222
  session_manager.add_message(session_id, "assistant", response_text)
223
+ yield format_chat_history(history, message, response_text)
224
  return
225
 
226
  except Exception as e:
 
232
  }
233
  response_text = f"```json\n{json.dumps(error_response, indent=2)}\n```"
234
  session_manager.add_message(session_id, "assistant", response_text)
235
+ yield format_chat_history(history, message, response_text)
236
  return
237
 
238
  # ========================
 
256
  "steps": []
257
  }
258
  accumulated_response = f"```json\n{json.dumps(initial_status, indent=2)}\n```"
259
+ yield format_chat_history(history, message, accumulated_response)
260
 
261
  steps_completed = []
262
  final_payload = None
 
280
  "executor": event.get("executor", "unknown")
281
  }
282
  accumulated_response = f"```json\n{json.dumps(info_status, indent=2)}\n```"
283
+ yield format_chat_history(history, message, accumulated_response)
284
 
285
  # Step updates
286
  elif event_type == "step":
 
301
  "steps_completed": steps_completed
302
  }
303
  accumulated_response = f"```json\n{json.dumps(progress_status, indent=2)}\n```"
304
+ yield format_chat_history(history, message, accumulated_response)
305
 
306
  # Final result
307
  elif event_type == "final":
 
319
  final_response = f"```json\n{json.dumps(error_result, indent=2)}\n```"
320
  session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
321
  session_manager.add_message(session_id, "assistant", final_response)
322
+ yield format_chat_history(history, message, final_response)
323
  return
324
 
325
  # Process final result
 
355
  session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
356
 
357
  session_manager.add_message(session_id, "assistant", final_response)
358
+ yield format_chat_history(history, message, final_response)
359
  return
360
 
361
  except Exception as e:
 
368
  final_response = f"```json\n{json.dumps(error_result, indent=2)}\n```"
369
  session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
370
  session_manager.add_message(session_id, "assistant", final_response)
371
+ yield format_chat_history(history, message, final_response)
372
  return
373
 
374
  # REJECT - Cancel the pipeline
 
384
  }
385
  response = f"```json\n{json.dumps(response_data, indent=2)}\n```"
386
  session_manager.add_message(session_id, "assistant", response)
387
+ yield format_chat_history(history, message, response)
388
  return
389
 
390
  # EDIT - Request modifications
 
404
  }
405
  response = f"```json\n{json.dumps(edit_help, indent=2)}\n```"
406
  session_manager.add_message(session_id, "assistant", response)
407
+ yield format_chat_history(history, message, response)
408
  return
409
 
410
  # Try to modify pipeline based on user input
 
429
  formatted = format_pipeline_for_display(new_pipeline)
430
  response = formatted + f"\n\n```json\n{json.dumps(new_pipeline, indent=2)}\n```"
431
  session_manager.add_message(session_id, "assistant", response)
432
+ yield format_chat_history(history, message, response)
433
  return
434
 
435
  except Exception as e:
 
441
  }
442
  response = f"```json\n{json.dumps(error_response, indent=2)}\n```"
443
  session_manager.add_message(session_id, "assistant", response)
444
+ yield format_chat_history(history, message, response)
445
  return
446
 
447
  # Default waiting message
 
452
  }
453
  response = f"```json\n{json.dumps(response_data, indent=2)}\n```"
454
  session_manager.add_message(session_id, "assistant", response)
455
+ yield format_chat_history(history, message, response)
456
  return
457
 
458
  # Default fallback
459
  response = json.dumps({"status": "ready", "message": "Ready for your next instruction"}, indent=2)
460
  session_manager.add_message(session_id, "assistant", response)
461
+ yield format_chat_history(history, message, response)
462
 
463
 
464
  # ========================
 
481
 
482
  with gr.Row():
483
  with gr.Column(scale=3):
484
+ # Chat interface (Gradio 5.x with messages format)
485
+ chatbot = gr.Chatbot(label="Chat", type="messages")
486
 
487
  # Text input
488
  msg = gr.Textbox(