jdesiree commited on
Commit
9957683
Β·
verified Β·
1 Parent(s): de8bc14

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -29
app.py CHANGED
@@ -1320,11 +1320,10 @@ def remove_loading_animations(chat_history):
1320
  "loading-animation" in str(msg.get("content", ""))
1321
  )]
1322
 
1323
-
1324
  def add_user_message(message, chat_history, conversation_state):
1325
  """
1326
  Add user message with proper state management.
1327
- βœ… FIXED: Returns updated states to Gradio components.
1328
  """
1329
  callback_start = log_step("add_user_message")
1330
 
@@ -1332,36 +1331,26 @@ def add_user_message(message, chat_history, conversation_state):
1332
  log_step("add_user_message", callback_start)
1333
  return "", chat_history, conversation_state
1334
 
1335
- # Get current state from global manager
1336
- current_state = global_state_manager.get_conversation_state()
1337
- chat_history = current_state['chat_history']
1338
- conversation_state = current_state['conversation_state']
1339
-
1340
  # Add to both states
1341
  conversation_state.append({"role": "user", "content": message})
1342
  chat_history.append({"role": "user", "content": message})
1343
 
1344
- # Update global state
1345
  global_state_manager.update_conversation_state(chat_history, conversation_state)
1346
 
1347
  log_step("add_user_message", callback_start)
1348
 
1349
- # βœ… CRITICAL: Return updated states to Gradio
1350
  return "", chat_history, conversation_state
1351
 
1352
 
1353
  def add_loading_animation(chat_history, conversation_state):
1354
  """
1355
  Add loading animation with proper state management.
1356
- βœ… FIXED: Returns updated states to Gradio components.
1357
  """
1358
  callback_start = log_step("add_loading_animation")
1359
 
1360
- # Get current state from global manager
1361
- current_state = global_state_manager.get_conversation_state()
1362
- chat_history = current_state['chat_history']
1363
- conversation_state = current_state['conversation_state']
1364
-
1365
  if not conversation_state:
1366
  log_step("add_loading_animation", callback_start)
1367
  return chat_history, conversation_state
@@ -1378,26 +1367,29 @@ def add_loading_animation(chat_history, conversation_state):
1378
 
1379
  chat_history.append({"role": "assistant", "content": loading_html})
1380
 
1381
- # Update global state
1382
  global_state_manager.update_conversation_state(chat_history, conversation_state)
1383
 
1384
  log_step("add_loading_animation", callback_start)
1385
 
1386
- # βœ… CRITICAL: Return updated states to Gradio
1387
  return chat_history, conversation_state
1388
 
1389
 
1390
  def generate_response(chat_history, conversation_state):
1391
  """
1392
  Generate response using orchestration with proper streaming.
1393
- βœ… FIXED: Loading animation stays until first chunk, then streams properly.
1394
  """
1395
  callback_start = log_step("generate_response")
1396
 
1397
- # Get fresh state from global manager
1398
- current_state = global_state_manager.get_conversation_state()
1399
- chat_history = current_state['chat_history']
1400
- conversation_state = current_state['conversation_state']
 
 
 
1401
 
1402
  if not conversation_state:
1403
  log_step("generate_response", callback_start)
@@ -1415,9 +1407,7 @@ def generate_response(chat_history, conversation_state):
1415
  return chat_history, conversation_state
1416
 
1417
  try:
1418
- # βœ… DON'T remove loading animation yet - let it show during orchestration
1419
-
1420
- # Call orchestration (this takes time)
1421
  orch_start = log_step("orchestrate_turn call")
1422
  raw_response = orchestrate_turn(last_user_message)
1423
  log_step("orchestrate_turn call", orch_start)
@@ -1425,7 +1415,7 @@ def generate_response(chat_history, conversation_state):
1425
  # Stream the processed response
1426
  first_chunk = True
1427
  for chunk in post_processor.process_and_stream_response(raw_response, last_user_message):
1428
- # βœ… Remove loading animation on FIRST chunk only
1429
  if first_chunk:
1430
  chat_history = remove_loading_animations(chat_history)
1431
  first_chunk = False
@@ -1436,7 +1426,7 @@ def generate_response(chat_history, conversation_state):
1436
  else:
1437
  chat_history.append({"role": "assistant", "content": chunk})
1438
 
1439
- # βœ… Yield to update UI during streaming
1440
  yield chat_history, conversation_state
1441
 
1442
  # Add final response to conversation state
@@ -1446,7 +1436,7 @@ def generate_response(chat_history, conversation_state):
1446
  # Update global state with final conversation
1447
  global_state_manager.update_conversation_state(chat_history, conversation_state)
1448
 
1449
- # βœ… Final yield with complete states
1450
  yield chat_history, conversation_state
1451
 
1452
  except Exception as e:
@@ -1487,10 +1477,12 @@ def load_conversation_state():
1487
  current_state = global_state_manager.get_conversation_state()
1488
  log_step("load_conversation_state", callback_start)
1489
 
1490
- # βœ… Extract and return both states
1491
  return current_state['chat_history'], current_state['conversation_state']
1492
 
1493
 
 
 
1494
  # ============================================================================
1495
  # MULTI-PAGE INTERFACE
1496
  # ============================================================================
 
1320
  "loading-animation" in str(msg.get("content", ""))
1321
  )]
1322
 
 
1323
  def add_user_message(message, chat_history, conversation_state):
1324
  """
1325
  Add user message with proper state management.
1326
+ βœ… FIXED: Uses Gradio state instead of overwriting with global manager.
1327
  """
1328
  callback_start = log_step("add_user_message")
1329
 
 
1331
  log_step("add_user_message", callback_start)
1332
  return "", chat_history, conversation_state
1333
 
 
 
 
 
 
1334
  # Add to both states
1335
  conversation_state.append({"role": "user", "content": message})
1336
  chat_history.append({"role": "user", "content": message})
1337
 
1338
+ # Update global state for persistence
1339
  global_state_manager.update_conversation_state(chat_history, conversation_state)
1340
 
1341
  log_step("add_user_message", callback_start)
1342
 
1343
+ # Return updated states to Gradio
1344
  return "", chat_history, conversation_state
1345
 
1346
 
1347
  def add_loading_animation(chat_history, conversation_state):
1348
  """
1349
  Add loading animation with proper state management.
1350
+ βœ… FIXED: Uses Gradio state instead of overwriting.
1351
  """
1352
  callback_start = log_step("add_loading_animation")
1353
 
 
 
 
 
 
1354
  if not conversation_state:
1355
  log_step("add_loading_animation", callback_start)
1356
  return chat_history, conversation_state
 
1367
 
1368
  chat_history.append({"role": "assistant", "content": loading_html})
1369
 
1370
+ # Update global state for persistence
1371
  global_state_manager.update_conversation_state(chat_history, conversation_state)
1372
 
1373
  log_step("add_loading_animation", callback_start)
1374
 
1375
+ # Return updated states to Gradio
1376
  return chat_history, conversation_state
1377
 
1378
 
1379
  def generate_response(chat_history, conversation_state):
1380
  """
1381
  Generate response using orchestration with proper streaming.
1382
+ Uses Gradio state instead of overwriting, only pulls from global on error.
1383
  """
1384
  callback_start = log_step("generate_response")
1385
 
1386
+ # Use the state passed by Gradio
1387
+ # Only pull from global manager if state is empty (error recovery)
1388
+ if not conversation_state:
1389
+ logger.warning("Empty conversation_state in generate_response, attempting recovery from global state")
1390
+ current_state = global_state_manager.get_conversation_state()
1391
+ chat_history = current_state['chat_history']
1392
+ conversation_state = current_state['conversation_state']
1393
 
1394
  if not conversation_state:
1395
  log_step("generate_response", callback_start)
 
1407
  return chat_history, conversation_state
1408
 
1409
  try:
1410
+ # Call orchestration
 
 
1411
  orch_start = log_step("orchestrate_turn call")
1412
  raw_response = orchestrate_turn(last_user_message)
1413
  log_step("orchestrate_turn call", orch_start)
 
1415
  # Stream the processed response
1416
  first_chunk = True
1417
  for chunk in post_processor.process_and_stream_response(raw_response, last_user_message):
1418
+ # Remove loading animation on FIRST chunk only
1419
  if first_chunk:
1420
  chat_history = remove_loading_animations(chat_history)
1421
  first_chunk = False
 
1426
  else:
1427
  chat_history.append({"role": "assistant", "content": chunk})
1428
 
1429
+ # Yield to update UI during streaming
1430
  yield chat_history, conversation_state
1431
 
1432
  # Add final response to conversation state
 
1436
  # Update global state with final conversation
1437
  global_state_manager.update_conversation_state(chat_history, conversation_state)
1438
 
1439
+ # Final yield with complete states
1440
  yield chat_history, conversation_state
1441
 
1442
  except Exception as e:
 
1477
  current_state = global_state_manager.get_conversation_state()
1478
  log_step("load_conversation_state", callback_start)
1479
 
1480
+ # Extract and return both states
1481
  return current_state['chat_history'], current_state['conversation_state']
1482
 
1483
 
1484
+
1485
+
1486
  # ============================================================================
1487
  # MULTI-PAGE INTERFACE
1488
  # ============================================================================