Spaces:
Sleeping
Sleeping
Commit Β·
9d6015f
1
Parent(s): d36787a
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,646 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# app.py - MasterLLM v2.0 with Bedrock Fallback System
|
| 2 |
"""
|
| 3 |
MasterLLM Pipeline Orchestrator v2.0
|
|
@@ -23,7 +666,9 @@ import asyncio
|
|
| 23 |
from services.pipeline_generator import generate_pipeline, format_pipeline_for_display
|
| 24 |
from services.pipeline_executor import execute_pipeline_streaming
|
| 25 |
from services.session_manager import session_manager
|
|
|
|
| 26 |
from api_routes import router as api_router
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
# ========================
|
|
@@ -76,7 +721,8 @@ app.add_middleware(
|
|
| 76 |
)
|
| 77 |
|
| 78 |
# Mount API routes
|
| 79 |
-
app.include_router(api_router)
|
|
|
|
| 80 |
|
| 81 |
|
| 82 |
# ========================
|
|
@@ -172,7 +818,7 @@ def format_chat_history(history, new_user_msg, new_assistant_msg):
|
|
| 172 |
def chatbot_response_streaming(message: str, history: List, session_id: str, file_path: str = None):
|
| 173 |
"""
|
| 174 |
Handle chat messages with streaming updates
|
| 175 |
-
Uses Bedrock (priority) β Gemini (fallback) for both generation and execution
|
| 176 |
"""
|
| 177 |
# Get or create session
|
| 178 |
session = session_manager.get_session(session_id)
|
|
@@ -191,24 +837,50 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 191 |
current_state = session.get("state", ConversationState.INITIAL)
|
| 192 |
|
| 193 |
# ========================
|
| 194 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
# ========================
|
| 196 |
if current_state == ConversationState.INITIAL:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
# Check if file is uploaded
|
| 198 |
if not session.get("current_file"):
|
| 199 |
-
|
| 200 |
-
"status": "error",
|
| 201 |
-
"message": "Please upload a document first",
|
| 202 |
-
"action": "π Click 'Upload Document' to begin"
|
| 203 |
-
}
|
| 204 |
-
response_text = f"```json\n{json.dumps(response, indent=2)}\n```"
|
| 205 |
session_manager.add_message(session_id, "assistant", response_text)
|
| 206 |
yield format_chat_history(history, message, response_text)
|
| 207 |
return
|
| 208 |
|
| 209 |
try:
|
| 210 |
# Generate pipeline using Bedrock β Gemini fallback
|
| 211 |
-
yield format_chat_history(history, message, "π€
|
| 212 |
|
| 213 |
pipeline = generate_pipeline(
|
| 214 |
user_input=message,
|
|
@@ -222,27 +894,34 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 222 |
"state": ConversationState.PIPELINE_PROPOSED
|
| 223 |
})
|
| 224 |
|
| 225 |
-
#
|
| 226 |
-
|
|
|
|
|
|
|
| 227 |
|
| 228 |
-
|
| 229 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
|
| 231 |
session_manager.add_message(session_id, "assistant", response_text)
|
| 232 |
yield format_chat_history(history, message, response_text)
|
| 233 |
return
|
| 234 |
|
| 235 |
except Exception as e:
|
| 236 |
-
error_response = {
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
"error": str(e),
|
| 240 |
-
"action": "Please try rephrasing your request"
|
| 241 |
-
}
|
| 242 |
-
response_text = f"```json\n{json.dumps(error_response, indent=2)}\n```"
|
| 243 |
-
session_manager.add_message(session_id, "assistant", response_text)
|
| 244 |
-
yield format_chat_history(history, message, response_text)
|
| 245 |
return
|
|
|
|
| 246 |
|
| 247 |
# ========================
|
| 248 |
# STATE: PIPELINE_PROPOSED - Handle Approval/Rejection
|
|
@@ -256,20 +935,14 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 256 |
|
| 257 |
plan = session.get("proposed_pipeline", {})
|
| 258 |
|
| 259 |
-
# Initial status
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
"message": "π Starting pipeline execution...",
|
| 263 |
-
"pipeline": plan.get("pipeline_name", "unknown"),
|
| 264 |
-
"executor": "Attempting Bedrock LangChain first",
|
| 265 |
-
"steps": []
|
| 266 |
-
}
|
| 267 |
-
accumulated_response = f"```json\n{json.dumps(initial_status, indent=2)}\n```"
|
| 268 |
-
yield format_chat_history(history, message, accumulated_response)
|
| 269 |
|
| 270 |
steps_completed = []
|
| 271 |
final_payload = None
|
| 272 |
executor_used = "unknown"
|
|
|
|
| 273 |
|
| 274 |
try:
|
| 275 |
# Execute pipeline with Bedrock β CrewAI fallback
|
|
@@ -283,12 +956,9 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 283 |
|
| 284 |
# Info events (fallback notifications, etc.)
|
| 285 |
if event_type == "info":
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
"executor": event.get("executor", "unknown")
|
| 290 |
-
}
|
| 291 |
-
accumulated_response = f"```json\n{json.dumps(info_status, indent=2)}\n```"
|
| 292 |
yield format_chat_history(history, message, accumulated_response)
|
| 293 |
|
| 294 |
# Step updates
|
|
@@ -311,25 +981,23 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 311 |
steps_completed.append(step_info)
|
| 312 |
executor_used = event.get("executor", executor_used)
|
| 313 |
|
| 314 |
-
# Create
|
| 315 |
-
|
|
|
|
|
|
|
| 316 |
if event.get('status') == 'completed' and 'observation' in event:
|
| 317 |
-
obs_preview = str(event.get('observation'))[:
|
| 318 |
-
|
| 319 |
elif event.get('status') == 'executing':
|
| 320 |
-
|
|
|
|
|
|
|
| 321 |
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
"message": status_message,
|
| 325 |
-
"pipeline": plan.get("pipeline_name", ""),
|
| 326 |
-
"executor": executor_used,
|
| 327 |
-
"current_step": step_info,
|
| 328 |
-
"steps_completed": steps_completed
|
| 329 |
-
}
|
| 330 |
-
accumulated_response = f"```json\n{json.dumps(progress_status, indent=2)}\n```"
|
| 331 |
yield format_chat_history(history, message, accumulated_response)
|
| 332 |
|
|
|
|
| 333 |
|
| 334 |
# Final result
|
| 335 |
elif event_type == "final":
|
|
@@ -338,16 +1006,11 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 338 |
|
| 339 |
# Error
|
| 340 |
elif event_type == "error":
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
"error": event.get("error"),
|
| 344 |
-
"steps_completed": steps_completed,
|
| 345 |
-
"executor": event.get("executor", "unknown")
|
| 346 |
-
}
|
| 347 |
-
final_response = f"```json\n{json.dumps(error_result, indent=2)}\n```"
|
| 348 |
session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 349 |
-
session_manager.add_message(session_id, "assistant",
|
| 350 |
-
yield format_chat_history(history, message,
|
| 351 |
return
|
| 352 |
|
| 353 |
# Process final result
|
|
@@ -366,20 +1029,28 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 366 |
executor=executor_used
|
| 367 |
)
|
| 368 |
|
| 369 |
-
#
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 381 |
else:
|
| 382 |
-
final_response = f"
|
| 383 |
session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 384 |
|
| 385 |
session_manager.add_message(session_id, "assistant", final_response)
|
|
@@ -387,16 +1058,10 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 387 |
return
|
| 388 |
|
| 389 |
except Exception as e:
|
| 390 |
-
|
| 391 |
-
"error": str(e),
|
| 392 |
-
"status": "failed",
|
| 393 |
-
"message": "Pipeline execution failed",
|
| 394 |
-
"steps_completed": steps_completed
|
| 395 |
-
}
|
| 396 |
-
final_response = f"```json\n{json.dumps(error_result, indent=2)}\n```"
|
| 397 |
session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 398 |
-
session_manager.add_message(session_id, "assistant",
|
| 399 |
-
yield format_chat_history(history, message,
|
| 400 |
return
|
| 401 |
|
| 402 |
# REJECT - Cancel the pipeline
|
|
@@ -405,34 +1070,27 @@ def chatbot_response_streaming(message: str, history: List, session_id: str, fil
|
|
| 405 |
"state": ConversationState.INITIAL,
|
| 406 |
"proposed_pipeline": None
|
| 407 |
})
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
"action": "π¬ Please provide a new instruction"
|
| 412 |
-
}
|
| 413 |
-
response = f"```json\n{json.dumps(response_data, indent=2)}\n```"
|
| 414 |
-
session_manager.add_message(session_id, "assistant", response)
|
| 415 |
-
yield format_chat_history(history, message, response)
|
| 416 |
return
|
| 417 |
|
| 418 |
# EDIT - Request modifications
|
| 419 |
elif "edit" in user_input or "modify" in user_input:
|
| 420 |
current_pipeline = session.get("proposed_pipeline", {})
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
session_manager.add_message(session_id, "assistant", response)
|
| 435 |
-
yield format_chat_history(history, message, response)
|
| 436 |
return
|
| 437 |
|
| 438 |
# Try to modify pipeline based on user input
|
|
|
|
| 1 |
+
# # app.py - MasterLLM v2.0 with Bedrock Fallback System
|
| 2 |
+
# """
|
| 3 |
+
# MasterLLM Pipeline Orchestrator v2.0
|
| 4 |
+
# - Bedrock (priority) + Gemini (fallback) for pipeline generation
|
| 5 |
+
# - Bedrock LangChain (priority) + CrewAI (fallback) for execution
|
| 6 |
+
# - MongoDB session management
|
| 7 |
+
# - Complete REST API
|
| 8 |
+
# - Gradio UI with fancy displays
|
| 9 |
+
# """
|
| 10 |
+
# import os
|
| 11 |
+
# import json
|
| 12 |
+
# import uuid
|
| 13 |
+
# from datetime import datetime
|
| 14 |
+
# from typing import List, Optional
|
| 15 |
+
|
| 16 |
+
# import gradio as gr
|
| 17 |
+
# from fastapi import FastAPI
|
| 18 |
+
# from fastapi.middleware.cors import CORSMiddleware
|
| 19 |
+
# from contextlib import asynccontextmanager
|
| 20 |
+
# import asyncio
|
| 21 |
+
|
| 22 |
+
# # Import our new services
|
| 23 |
+
# from services.pipeline_generator import generate_pipeline, format_pipeline_for_display
|
| 24 |
+
# from services.pipeline_executor import execute_pipeline_streaming
|
| 25 |
+
# from services.session_manager import session_manager
|
| 26 |
+
# from api_routes import router as api_router
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# # ========================
|
| 30 |
+
# # BACKGROUND CLEANUP TASK
|
| 31 |
+
# # ========================
|
| 32 |
+
|
| 33 |
+
# async def periodic_cleanup():
|
| 34 |
+
# """Cleanup old sessions every hour"""
|
| 35 |
+
# while True:
|
| 36 |
+
# await asyncio.sleep(3600) # Run every hour
|
| 37 |
+
# try:
|
| 38 |
+
# removed = session_manager.cleanup_old_sessions(max_age_hours=24)
|
| 39 |
+
# if removed > 0:
|
| 40 |
+
# print(f"π§Ή Cleaned up {removed} inactive sessions")
|
| 41 |
+
# except Exception as e:
|
| 42 |
+
# print(f"β οΈ Cleanup error: {e}")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# @asynccontextmanager
|
| 46 |
+
# async def lifespan(app: FastAPI):
|
| 47 |
+
# """Manage application lifecycle"""
|
| 48 |
+
# # Startup
|
| 49 |
+
# print("π Starting MasterLLM v2.0...")
|
| 50 |
+
# task = asyncio.create_task(periodic_cleanup())
|
| 51 |
+
# yield
|
| 52 |
+
# # Shutdown
|
| 53 |
+
# task.cancel()
|
| 54 |
+
# session_manager.close()
|
| 55 |
+
# print("π MasterLLM shut down gracefully")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# # ========================
|
| 59 |
+
# # FASTAPI APP
|
| 60 |
+
# # ========================
|
| 61 |
+
|
| 62 |
+
# app = FastAPI(
|
| 63 |
+
# title="MasterLLM v2.0 - AI Pipeline Orchestrator",
|
| 64 |
+
# description="Bedrock + Gemini fallback system with MongoDB sessions",
|
| 65 |
+
# version="2.0.0",
|
| 66 |
+
# lifespan=lifespan
|
| 67 |
+
# )
|
| 68 |
+
|
| 69 |
+
# # CORS Configuration
|
| 70 |
+
# app.add_middleware(
|
| 71 |
+
# CORSMiddleware,
|
| 72 |
+
# allow_origins=[os.getenv("FRONTEND_ORIGIN", "http://localhost:3000")],
|
| 73 |
+
# allow_credentials=True,
|
| 74 |
+
# allow_methods=["*"],
|
| 75 |
+
# allow_headers=["*"],
|
| 76 |
+
# )
|
| 77 |
+
|
| 78 |
+
# # Mount API routes
|
| 79 |
+
# app.include_router(api_router)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
# # ========================
|
| 83 |
+
# # CONVERSATION STATE
|
| 84 |
+
# # ========================
|
| 85 |
+
|
| 86 |
+
# class ConversationState:
|
| 87 |
+
# INITIAL = "initial"
|
| 88 |
+
# PIPELINE_PROPOSED = "pipeline_proposed"
|
| 89 |
+
# PIPELINE_APPROVED = "pipeline_approved"
|
| 90 |
+
# EXECUTING = "executing"
|
| 91 |
+
# COMPLETED = "completed"
|
| 92 |
+
# ERROR = "error"
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# # ========================
|
| 96 |
+
# # GRADIO UI HANDLERS
|
| 97 |
+
# # ========================
|
| 98 |
+
|
| 99 |
+
# def create_new_session():
|
| 100 |
+
# """Create a new session"""
|
| 101 |
+
# return session_manager.create_session()
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# def handle_file_upload(file_path, session_id):
|
| 105 |
+
# """Handle file upload"""
|
| 106 |
+
# if not file_path:
|
| 107 |
+
# return None, json.dumps({
|
| 108 |
+
# "status": "error",
|
| 109 |
+
# "message": "No file uploaded"
|
| 110 |
+
# }, indent=2), session_id
|
| 111 |
+
|
| 112 |
+
# if not session_id:
|
| 113 |
+
# session_id = create_new_session()
|
| 114 |
+
|
| 115 |
+
# file_name = os.path.basename(file_path)
|
| 116 |
+
|
| 117 |
+
# # Update session
|
| 118 |
+
# session_manager.update_session(session_id, {
|
| 119 |
+
# "current_file": file_path,
|
| 120 |
+
# "state": ConversationState.INITIAL
|
| 121 |
+
# })
|
| 122 |
+
|
| 123 |
+
# # Add system message
|
| 124 |
+
# session_manager.add_message(
|
| 125 |
+
# session_id,
|
| 126 |
+
# "system",
|
| 127 |
+
# f"File uploaded: {file_name}"
|
| 128 |
+
# )
|
| 129 |
+
|
| 130 |
+
# status = {
|
| 131 |
+
# "status": "success",
|
| 132 |
+
# "message": f"File '{file_name}' uploaded successfully",
|
| 133 |
+
# "file_info": {
|
| 134 |
+
# "name": file_name,
|
| 135 |
+
# "path": file_path,
|
| 136 |
+
# "size_bytes": os.path.getsize(file_path) if os.path.exists(file_path) else 0
|
| 137 |
+
# },
|
| 138 |
+
# "next_action": "π¬ Now tell me what you'd like to do with this document"
|
| 139 |
+
# }
|
| 140 |
+
|
| 141 |
+
# return file_path, json.dumps(status, indent=2), session_id
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
# def format_chat_history(history, new_user_msg, new_assistant_msg):
|
| 145 |
+
# """
|
| 146 |
+
# Convert chat history to new Gradio format (list of dicts with role/content)
|
| 147 |
+
# Handles both old format (tuples) and new format (dicts)
|
| 148 |
+
# """
|
| 149 |
+
# messages = []
|
| 150 |
+
|
| 151 |
+
# # Handle existing history - could be in old or new format
|
| 152 |
+
# if history:
|
| 153 |
+
# # Check if already in new format (list of dicts with 'role' and 'content')
|
| 154 |
+
# if isinstance(history[0], dict) and 'role' in history[0]:
|
| 155 |
+
# # Already in new format, just copy it
|
| 156 |
+
# messages = list(history)
|
| 157 |
+
# else:
|
| 158 |
+
# # Old format (list of tuples), convert it
|
| 159 |
+
# for item in history:
|
| 160 |
+
# if isinstance(item, (list, tuple)) and len(item) == 2:
|
| 161 |
+
# user_msg, bot_msg = item
|
| 162 |
+
# messages.append({"role": "user", "content": user_msg})
|
| 163 |
+
# messages.append({"role": "assistant", "content": bot_msg})
|
| 164 |
+
|
| 165 |
+
# # Add new messages
|
| 166 |
+
# messages.append({"role": "user", "content": new_user_msg})
|
| 167 |
+
# messages.append({"role": "assistant", "content": new_assistant_msg})
|
| 168 |
+
|
| 169 |
+
# return messages
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
# def chatbot_response_streaming(message: str, history: List, session_id: str, file_path: str = None):
|
| 173 |
+
# """
|
| 174 |
+
# Handle chat messages with streaming updates
|
| 175 |
+
# Uses Bedrock (priority) β Gemini (fallback) for both generation and execution
|
| 176 |
+
# """
|
| 177 |
+
# # Get or create session
|
| 178 |
+
# session = session_manager.get_session(session_id)
|
| 179 |
+
# if not session:
|
| 180 |
+
# session_id = create_new_session()
|
| 181 |
+
# session = session_manager.get_session(session_id)
|
| 182 |
+
|
| 183 |
+
# # Update file path if provided
|
| 184 |
+
# if file_path:
|
| 185 |
+
# session_manager.update_session(session_id, {"current_file": file_path})
|
| 186 |
+
# session = session_manager.get_session(session_id)
|
| 187 |
+
|
| 188 |
+
# # Add user message to session
|
| 189 |
+
# session_manager.add_message(session_id, "user", message)
|
| 190 |
+
|
| 191 |
+
# current_state = session.get("state", ConversationState.INITIAL)
|
| 192 |
+
|
| 193 |
+
# # ========================
|
| 194 |
+
# # STATE: INITIAL - Generate Pipeline
|
| 195 |
+
# # ========================
|
| 196 |
+
# if current_state == ConversationState.INITIAL:
|
| 197 |
+
# # Check if file is uploaded
|
| 198 |
+
# if not session.get("current_file"):
|
| 199 |
+
# response = {
|
| 200 |
+
# "status": "error",
|
| 201 |
+
# "message": "Please upload a document first",
|
| 202 |
+
# "action": "π Click 'Upload Document' to begin"
|
| 203 |
+
# }
|
| 204 |
+
# response_text = f"```json\n{json.dumps(response, indent=2)}\n```"
|
| 205 |
+
# session_manager.add_message(session_id, "assistant", response_text)
|
| 206 |
+
# yield format_chat_history(history, message, response_text)
|
| 207 |
+
# return
|
| 208 |
+
|
| 209 |
+
# try:
|
| 210 |
+
# # Generate pipeline using Bedrock β Gemini fallback
|
| 211 |
+
# yield format_chat_history(history, message, "π€ Generating pipeline with AI...\nβ³ Trying Bedrock first...")
|
| 212 |
+
|
| 213 |
+
# pipeline = generate_pipeline(
|
| 214 |
+
# user_input=message,
|
| 215 |
+
# file_path=session.get("current_file"),
|
| 216 |
+
# prefer_bedrock=True
|
| 217 |
+
# )
|
| 218 |
+
|
| 219 |
+
# # Save proposed pipeline to session
|
| 220 |
+
# session_manager.update_session(session_id, {
|
| 221 |
+
# "proposed_pipeline": pipeline,
|
| 222 |
+
# "state": ConversationState.PIPELINE_PROPOSED
|
| 223 |
+
# })
|
| 224 |
+
|
| 225 |
+
# # Format for display
|
| 226 |
+
# formatted_display = format_pipeline_for_display(pipeline)
|
| 227 |
+
|
| 228 |
+
# # Create response with both fancy display and JSON
|
| 229 |
+
# response_text = formatted_display + f"\n\n```json\n{json.dumps(pipeline, indent=2)}\n```"
|
| 230 |
+
|
| 231 |
+
# session_manager.add_message(session_id, "assistant", response_text)
|
| 232 |
+
# yield format_chat_history(history, message, response_text)
|
| 233 |
+
# return
|
| 234 |
+
|
| 235 |
+
# except Exception as e:
|
| 236 |
+
# error_response = {
|
| 237 |
+
# "status": "error",
|
| 238 |
+
# "message": "Failed to generate pipeline",
|
| 239 |
+
# "error": str(e),
|
| 240 |
+
# "action": "Please try rephrasing your request"
|
| 241 |
+
# }
|
| 242 |
+
# response_text = f"```json\n{json.dumps(error_response, indent=2)}\n```"
|
| 243 |
+
# session_manager.add_message(session_id, "assistant", response_text)
|
| 244 |
+
# yield format_chat_history(history, message, response_text)
|
| 245 |
+
# return
|
| 246 |
+
|
| 247 |
+
# # ========================
|
| 248 |
+
# # STATE: PIPELINE_PROPOSED - Handle Approval/Rejection
|
| 249 |
+
# # ========================
|
| 250 |
+
# elif current_state == ConversationState.PIPELINE_PROPOSED:
|
| 251 |
+
# user_input = message.lower().strip()
|
| 252 |
+
|
| 253 |
+
# # APPROVE - Execute the pipeline
|
| 254 |
+
# if "approve" in user_input or "yes" in user_input:
|
| 255 |
+
# session_manager.update_session(session_id, {"state": ConversationState.EXECUTING})
|
| 256 |
+
|
| 257 |
+
# plan = session.get("proposed_pipeline", {})
|
| 258 |
+
|
| 259 |
+
# # Initial status
|
| 260 |
+
# initial_status = {
|
| 261 |
+
# "status": "executing",
|
| 262 |
+
# "message": "π Starting pipeline execution...",
|
| 263 |
+
# "pipeline": plan.get("pipeline_name", "unknown"),
|
| 264 |
+
# "executor": "Attempting Bedrock LangChain first",
|
| 265 |
+
# "steps": []
|
| 266 |
+
# }
|
| 267 |
+
# accumulated_response = f"```json\n{json.dumps(initial_status, indent=2)}\n```"
|
| 268 |
+
# yield format_chat_history(history, message, accumulated_response)
|
| 269 |
+
|
| 270 |
+
# steps_completed = []
|
| 271 |
+
# final_payload = None
|
| 272 |
+
# executor_used = "unknown"
|
| 273 |
+
|
| 274 |
+
# try:
|
| 275 |
+
# # Execute pipeline with Bedrock β CrewAI fallback
|
| 276 |
+
# for event in execute_pipeline_streaming(
|
| 277 |
+
# pipeline=plan,
|
| 278 |
+
# file_path=session.get("current_file"),
|
| 279 |
+
# session_id=session_id,
|
| 280 |
+
# prefer_bedrock=True
|
| 281 |
+
# ):
|
| 282 |
+
# event_type = event.get("type")
|
| 283 |
+
|
| 284 |
+
# # Info events (fallback notifications, etc.)
|
| 285 |
+
# if event_type == "info":
|
| 286 |
+
# info_status = {
|
| 287 |
+
# "status": "info",
|
| 288 |
+
# "message": event.get("message"),
|
| 289 |
+
# "executor": event.get("executor", "unknown")
|
| 290 |
+
# }
|
| 291 |
+
# accumulated_response = f"```json\n{json.dumps(info_status, indent=2)}\n```"
|
| 292 |
+
# yield format_chat_history(history, message, accumulated_response)
|
| 293 |
+
|
| 294 |
+
# # Step updates
|
| 295 |
+
# elif event_type == "step":
|
| 296 |
+
# step_info = {
|
| 297 |
+
# "step": event.get("step", 0),
|
| 298 |
+
# "tool": event.get("tool", "processing"),
|
| 299 |
+
# "status": event.get("status", "running"),
|
| 300 |
+
# "executor": event.get("executor", "unknown")
|
| 301 |
+
# }
|
| 302 |
+
|
| 303 |
+
# # Add observation if available (tool output)
|
| 304 |
+
# if "observation" in event:
|
| 305 |
+
# step_info["observation"] = event.get("observation")
|
| 306 |
+
|
| 307 |
+
# # Add tool input if available
|
| 308 |
+
# if "input" in event:
|
| 309 |
+
# step_info["input"] = event.get("input")
|
| 310 |
+
|
| 311 |
+
# steps_completed.append(step_info)
|
| 312 |
+
# executor_used = event.get("executor", executor_used)
|
| 313 |
+
|
| 314 |
+
# # Create more informative status message
|
| 315 |
+
# status_message = f"π Step {event.get('step', 0)}: {event.get('tool', 'processing')}"
|
| 316 |
+
# if event.get('status') == 'completed' and 'observation' in event:
|
| 317 |
+
# obs_preview = str(event.get('observation'))[:100]
|
| 318 |
+
# status_message += f" β
\n Output: {obs_preview}..."
|
| 319 |
+
# elif event.get('status') == 'executing':
|
| 320 |
+
# status_message += " β³"
|
| 321 |
+
|
| 322 |
+
# progress_status = {
|
| 323 |
+
# "status": "executing",
|
| 324 |
+
# "message": status_message,
|
| 325 |
+
# "pipeline": plan.get("pipeline_name", ""),
|
| 326 |
+
# "executor": executor_used,
|
| 327 |
+
# "current_step": step_info,
|
| 328 |
+
# "steps_completed": steps_completed
|
| 329 |
+
# }
|
| 330 |
+
# accumulated_response = f"```json\n{json.dumps(progress_status, indent=2)}\n```"
|
| 331 |
+
# yield format_chat_history(history, message, accumulated_response)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
# # Final result
|
| 335 |
+
# elif event_type == "final":
|
| 336 |
+
# final_payload = event.get("data")
|
| 337 |
+
# executor_used = event.get("executor", executor_used)
|
| 338 |
+
|
| 339 |
+
# # Error
|
| 340 |
+
# elif event_type == "error":
|
| 341 |
+
# error_result = {
|
| 342 |
+
# "status": "failed",
|
| 343 |
+
# "error": event.get("error"),
|
| 344 |
+
# "steps_completed": steps_completed,
|
| 345 |
+
# "executor": event.get("executor", "unknown")
|
| 346 |
+
# }
|
| 347 |
+
# final_response = f"```json\n{json.dumps(error_result, indent=2)}\n```"
|
| 348 |
+
# session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 349 |
+
# session_manager.add_message(session_id, "assistant", final_response)
|
| 350 |
+
# yield format_chat_history(history, message, final_response)
|
| 351 |
+
# return
|
| 352 |
+
|
| 353 |
+
# # Process final result
|
| 354 |
+
# if final_payload:
|
| 355 |
+
# session_manager.update_session(session_id, {
|
| 356 |
+
# "pipeline_result": final_payload,
|
| 357 |
+
# "state": ConversationState.INITIAL
|
| 358 |
+
# })
|
| 359 |
+
|
| 360 |
+
# # Save execution to MongoDB
|
| 361 |
+
# session_manager.save_pipeline_execution(
|
| 362 |
+
# session_id=session_id,
|
| 363 |
+
# pipeline=plan,
|
| 364 |
+
# result=final_payload,
|
| 365 |
+
# file_path=session.get("current_file"),
|
| 366 |
+
# executor=executor_used
|
| 367 |
+
# )
|
| 368 |
+
|
| 369 |
+
# # Format final response
|
| 370 |
+
# final_display = {
|
| 371 |
+
# "status": "completed",
|
| 372 |
+
# "executor": executor_used,
|
| 373 |
+
# "pipeline": plan.get("pipeline_name"),
|
| 374 |
+
# "result": final_payload,
|
| 375 |
+
# "summary": {
|
| 376 |
+
# "total_steps": len(steps_completed),
|
| 377 |
+
# "completed_successfully": len([s for s in steps_completed if s.get("status") == "completed"])
|
| 378 |
+
# }
|
| 379 |
+
# }
|
| 380 |
+
# final_response = f"```json\n{json.dumps(final_display, indent=2)}\n```"
|
| 381 |
+
# else:
|
| 382 |
+
# final_response = f"```json\n{json.dumps({'status': 'completed', 'steps': steps_completed, 'executor': executor_used}, indent=2)}\n```"
|
| 383 |
+
# session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 384 |
+
|
| 385 |
+
# session_manager.add_message(session_id, "assistant", final_response)
|
| 386 |
+
# yield format_chat_history(history, message, final_response)
|
| 387 |
+
# return
|
| 388 |
+
|
| 389 |
+
# except Exception as e:
|
| 390 |
+
# error_result = {
|
| 391 |
+
# "error": str(e),
|
| 392 |
+
# "status": "failed",
|
| 393 |
+
# "message": "Pipeline execution failed",
|
| 394 |
+
# "steps_completed": steps_completed
|
| 395 |
+
# }
|
| 396 |
+
# final_response = f"```json\n{json.dumps(error_result, indent=2)}\n```"
|
| 397 |
+
# session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 398 |
+
# session_manager.add_message(session_id, "assistant", final_response)
|
| 399 |
+
# yield format_chat_history(history, message, final_response)
|
| 400 |
+
# return
|
| 401 |
+
|
| 402 |
+
# # REJECT - Cancel the pipeline
|
| 403 |
+
# elif "reject" in user_input or "no" in user_input:
|
| 404 |
+
# session_manager.update_session(session_id, {
|
| 405 |
+
# "state": ConversationState.INITIAL,
|
| 406 |
+
# "proposed_pipeline": None
|
| 407 |
+
# })
|
| 408 |
+
# response_data = {
|
| 409 |
+
# "status": "rejected",
|
| 410 |
+
# "message": "Pipeline rejected by user",
|
| 411 |
+
# "action": "π¬ Please provide a new instruction"
|
| 412 |
+
# }
|
| 413 |
+
# response = f"```json\n{json.dumps(response_data, indent=2)}\n```"
|
| 414 |
+
# session_manager.add_message(session_id, "assistant", response)
|
| 415 |
+
# yield format_chat_history(history, message, response)
|
| 416 |
+
# return
|
| 417 |
+
|
| 418 |
+
# # EDIT - Request modifications
|
| 419 |
+
# elif "edit" in user_input or "modify" in user_input:
|
| 420 |
+
# current_pipeline = session.get("proposed_pipeline", {})
|
| 421 |
+
# edit_help = {
|
| 422 |
+
# "status": "edit_mode",
|
| 423 |
+
# "message": "To modify the plan, describe your changes",
|
| 424 |
+
# "current_plan": current_pipeline,
|
| 425 |
+
# "examples": [
|
| 426 |
+
# "Add summarization at the end",
|
| 427 |
+
# "Remove table extraction",
|
| 428 |
+
# "Only process pages 1-3",
|
| 429 |
+
# "Translate to French instead of Spanish"
|
| 430 |
+
# ],
|
| 431 |
+
# "action": "Describe your changes, or say 'approve' to run as-is"
|
| 432 |
+
# }
|
| 433 |
+
# response = f"```json\n{json.dumps(edit_help, indent=2)}\n```"
|
| 434 |
+
# session_manager.add_message(session_id, "assistant", response)
|
| 435 |
+
# yield format_chat_history(history, message, response)
|
| 436 |
+
# return
|
| 437 |
+
|
| 438 |
+
# # Try to modify pipeline based on user input
|
| 439 |
+
# else:
|
| 440 |
+
# if len(message.strip()) > 5:
|
| 441 |
+
# try:
|
| 442 |
+
# original_plan = session.get("proposed_pipeline", {})
|
| 443 |
+
# edit_context = f"Original: {original_plan.get('pipeline_name')}. User wants: {message}"
|
| 444 |
+
|
| 445 |
+
# # Generate new pipeline with modification
|
| 446 |
+
# new_pipeline = generate_pipeline(
|
| 447 |
+
# user_input=edit_context,
|
| 448 |
+
# file_path=session.get("current_file"),
|
| 449 |
+
# prefer_bedrock=True
|
| 450 |
+
# )
|
| 451 |
+
|
| 452 |
+
# session_manager.update_session(session_id, {
|
| 453 |
+
# "proposed_pipeline": new_pipeline,
|
| 454 |
+
# "state": ConversationState.PIPELINE_PROPOSED
|
| 455 |
+
# })
|
| 456 |
+
|
| 457 |
+
# formatted = format_pipeline_for_display(new_pipeline)
|
| 458 |
+
# response = formatted + f"\n\n```json\n{json.dumps(new_pipeline, indent=2)}\n```"
|
| 459 |
+
# session_manager.add_message(session_id, "assistant", response)
|
| 460 |
+
# yield format_chat_history(history, message, response)
|
| 461 |
+
# return
|
| 462 |
+
|
| 463 |
+
# except Exception as e:
|
| 464 |
+
# error_response = {
|
| 465 |
+
# "status": "edit_failed",
|
| 466 |
+
# "error": str(e),
|
| 467 |
+
# "message": "Could not modify the plan",
|
| 468 |
+
# "action": "Try 'approve' to run as-is, or 'reject' to start over"
|
| 469 |
+
# }
|
| 470 |
+
# response = f"```json\n{json.dumps(error_response, indent=2)}\n```"
|
| 471 |
+
# session_manager.add_message(session_id, "assistant", response)
|
| 472 |
+
# yield format_chat_history(history, message, response)
|
| 473 |
+
# return
|
| 474 |
+
|
| 475 |
+
# # Default waiting message
|
| 476 |
+
# response_data = {
|
| 477 |
+
# "status": "waiting_for_confirmation",
|
| 478 |
+
# "message": "Please type 'approve', 'reject', or describe changes",
|
| 479 |
+
# "hint": "You can also say 'edit' for modification hints"
|
| 480 |
+
# }
|
| 481 |
+
# response = f"```json\n{json.dumps(response_data, indent=2)}\n```"
|
| 482 |
+
# session_manager.add_message(session_id, "assistant", response)
|
| 483 |
+
# yield format_chat_history(history, message, response)
|
| 484 |
+
# return
|
| 485 |
+
|
| 486 |
+
# # Default fallback
|
| 487 |
+
# response = json.dumps({"status": "ready", "message": "Ready for your next instruction"}, indent=2)
|
| 488 |
+
# session_manager.add_message(session_id, "assistant", response)
|
| 489 |
+
# yield format_chat_history(history, message, response)
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
# # ========================
|
| 493 |
+
# # GRADIO UI
|
| 494 |
+
# # ========================
|
| 495 |
+
|
| 496 |
+
# # Simple Blocks initialization for HF Spaces compatibility (older Gradio version)
|
| 497 |
+
# with gr.Blocks(title="MasterLLM v2.0 - AI Pipeline Orchestrator") as demo:
|
| 498 |
+
# gr.Markdown("""
|
| 499 |
+
# # π€ MasterLLM v2.0 - AI Pipeline Orchestrator
|
| 500 |
+
|
| 501 |
+
# **π Bedrock Priority** with Gemini Fallback | **πΎ MongoDB Sessions** | **π‘ Complete REST API**
|
| 502 |
+
|
| 503 |
+
# Upload a document, describe what you want, and watch AI orchestrate the perfect pipeline!
|
| 504 |
+
# """)
|
| 505 |
+
|
| 506 |
+
# # State management
|
| 507 |
+
# session_id_state = gr.State(value=create_new_session())
|
| 508 |
+
# file_state = gr.State(value=None)
|
| 509 |
+
|
| 510 |
+
# with gr.Row():
|
| 511 |
+
# with gr.Column(scale=3):
|
| 512 |
+
# # Chat interface - Gradio auto-detects format from data structure
|
| 513 |
+
# chatbot = gr.Chatbot(label="Chat")
|
| 514 |
+
|
| 515 |
+
# # Text input
|
| 516 |
+
# msg = gr.Textbox(
|
| 517 |
+
# placeholder="π¬ Type your instruction... (e.g., 'extract text from pages 1-5 and summarize')",
|
| 518 |
+
# label="Your Message",
|
| 519 |
+
# lines=2,
|
| 520 |
+
# max_lines=4,
|
| 521 |
+
# )
|
| 522 |
+
|
| 523 |
+
# with gr.Row():
|
| 524 |
+
# submit_btn = gr.Button("π Send", variant="primary", scale=2)
|
| 525 |
+
# clear_btn = gr.Button("ποΈ Clear Chat", scale=1)
|
| 526 |
+
|
| 527 |
+
# with gr.Column(scale=1):
|
| 528 |
+
# # File upload section
|
| 529 |
+
# gr.Markdown("### π Upload Document")
|
| 530 |
+
# file_upload = gr.File(
|
| 531 |
+
# label="PDF or Image",
|
| 532 |
+
# file_types=[".pdf", ".png", ".jpg", ".jpeg", ".gif", ".bmp"],
|
| 533 |
+
# type="filepath",
|
| 534 |
+
# )
|
| 535 |
+
|
| 536 |
+
# upload_status = gr.Textbox(
|
| 537 |
+
# label="π Upload Status",
|
| 538 |
+
# interactive=False,
|
| 539 |
+
# lines=10,
|
| 540 |
+
# max_lines=15,
|
| 541 |
+
# )
|
| 542 |
+
|
| 543 |
+
# # Session info
|
| 544 |
+
# gr.Markdown("### π Session Info")
|
| 545 |
+
# session_display = gr.Textbox(
|
| 546 |
+
# label="Session ID",
|
| 547 |
+
# interactive=False,
|
| 548 |
+
# value=lambda: session_id_state.value[:8] + "...",
|
| 549 |
+
# )
|
| 550 |
+
|
| 551 |
+
# # Examples
|
| 552 |
+
# gr.Markdown("### π‘ Example Pipelines")
|
| 553 |
+
# gr.Examples(
|
| 554 |
+
# examples=[
|
| 555 |
+
# "extract text from pages 1-5",
|
| 556 |
+
# "extract text and summarize",
|
| 557 |
+
# "extract text, tables, and translate to Spanish",
|
| 558 |
+
# "get tables from pages 2-4 and summarize",
|
| 559 |
+
# "text-classify-ner from entire document",
|
| 560 |
+
# "describe images and summarize findings",
|
| 561 |
+
# "extract text, detect signatures and stamps",
|
| 562 |
+
# ],
|
| 563 |
+
# inputs=msg,
|
| 564 |
+
# )
|
| 565 |
+
|
| 566 |
+
# # System info
|
| 567 |
+
# gr.Markdown("""
|
| 568 |
+
# ### βΉοΈ System Features
|
| 569 |
+
# - β
**Bedrock** (Claude 3.5 Sonnet) priority
|
| 570 |
+
# - β
**Gemini** (gemini-2.0-flash) fallback
|
| 571 |
+
# - β
**MongoDB** session persistence
|
| 572 |
+
# - β
**Streaming** real-time updates
|
| 573 |
+
# - β
**Component-level** JSON output
|
| 574 |
+
# - β
**REST API** for integration
|
| 575 |
+
|
| 576 |
+
# ### π Pipeline Flow:
|
| 577 |
+
# 1. **Upload** your document
|
| 578 |
+
# 2. **Describe** what you want
|
| 579 |
+
# 3. **Review** AI-generated pipeline
|
| 580 |
+
# 4. **Approve** to execute
|
| 581 |
+
# 5. **Watch** streaming updates
|
| 582 |
+
# 6. **Get** complete JSON results
|
| 583 |
+
# """)
|
| 584 |
+
|
| 585 |
+
# # Event handlers
|
| 586 |
+
# file_upload.upload(
|
| 587 |
+
# fn=handle_file_upload,
|
| 588 |
+
# inputs=[file_upload, session_id_state],
|
| 589 |
+
# outputs=[file_state, upload_status, session_id_state],
|
| 590 |
+
# )
|
| 591 |
+
|
| 592 |
+
# msg.submit(
|
| 593 |
+
# fn=chatbot_response_streaming,
|
| 594 |
+
# inputs=[msg, chatbot, session_id_state, file_state],
|
| 595 |
+
# outputs=[chatbot],
|
| 596 |
+
# ).then(
|
| 597 |
+
# lambda: "",
|
| 598 |
+
# outputs=msg,
|
| 599 |
+
# )
|
| 600 |
+
|
| 601 |
+
# submit_btn.click(
|
| 602 |
+
# fn=chatbot_response_streaming,
|
| 603 |
+
# inputs=[msg, chatbot, session_id_state, file_state],
|
| 604 |
+
# outputs=[chatbot],
|
| 605 |
+
# ).then(
|
| 606 |
+
# lambda: "",
|
| 607 |
+
# outputs=msg,
|
| 608 |
+
# )
|
| 609 |
+
|
| 610 |
+
# clear_btn.click(
|
| 611 |
+
# fn=lambda: ([], create_new_session(), None, None, "", ""),
|
| 612 |
+
# outputs=[chatbot, session_id_state, file_state, file_upload, msg, upload_status],
|
| 613 |
+
# )
|
| 614 |
+
|
| 615 |
+
# # Mount Gradio on FastAPI
|
| 616 |
+
# app = gr.mount_gradio_app(app, demo, path="/")
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
# # ========================
|
| 620 |
+
# # LAUNCH
|
| 621 |
+
# # ========================
|
| 622 |
+
|
| 623 |
+
# if __name__ == "__main__":
|
| 624 |
+
# import uvicorn
|
| 625 |
+
# port = int(os.getenv("PORT", 7860))
|
| 626 |
+
# print(f"""
|
| 627 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 628 |
+
# β β
|
| 629 |
+
# β π MasterLLM v2.0 Starting... β
|
| 630 |
+
# β β
|
| 631 |
+
# β π Gradio UI: http://localhost:{port} β
|
| 632 |
+
# β π‘ REST API: http://localhost:{port}/api/v1 β
|
| 633 |
+
# β π API Docs: http://localhost:{port}/docs β
|
| 634 |
+
# β β
|
| 635 |
+
# β π Bedrock: Priority (Claude 3.5 Sonnet) β
|
| 636 |
+
# β π Gemini: Fallback (gemini-2.0-flash) β
|
| 637 |
+
# β πΎ MongoDB: Session management β
|
| 638 |
+
# β β
|
| 639 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 640 |
+
# """)
|
| 641 |
+
|
| 642 |
+
# uvicorn.run(app, host="0.0.0.0", port=port)
|
| 643 |
+
|
| 644 |
# app.py - MasterLLM v2.0 with Bedrock Fallback System
|
| 645 |
"""
|
| 646 |
MasterLLM Pipeline Orchestrator v2.0
|
|
|
|
| 666 |
from services.pipeline_generator import generate_pipeline, format_pipeline_for_display
|
| 667 |
from services.pipeline_executor import execute_pipeline_streaming
|
| 668 |
from services.session_manager import session_manager
|
| 669 |
+
from services.intent_classifier import intent_classifier
|
| 670 |
from api_routes import router as api_router
|
| 671 |
+
from api_routes_v2 import router as api_router_v2
|
| 672 |
|
| 673 |
|
| 674 |
# ========================
|
|
|
|
| 721 |
)
|
| 722 |
|
| 723 |
# Mount API routes
|
| 724 |
+
app.include_router(api_router) # V1 API (legacy)
|
| 725 |
+
app.include_router(api_router_v2) # V2 API (enhanced with intent classification)
|
| 726 |
|
| 727 |
|
| 728 |
# ========================
|
|
|
|
| 818 |
def chatbot_response_streaming(message: str, history: List, session_id: str, file_path: str = None):
|
| 819 |
"""
|
| 820 |
Handle chat messages with streaming updates
|
| 821 |
+
Uses intent classification + Bedrock (priority) β Gemini (fallback) for both generation and execution
|
| 822 |
"""
|
| 823 |
# Get or create session
|
| 824 |
session = session_manager.get_session(session_id)
|
|
|
|
| 837 |
current_state = session.get("state", ConversationState.INITIAL)
|
| 838 |
|
| 839 |
# ========================
|
| 840 |
+
# CLASSIFY USER INTENT
|
| 841 |
+
# ========================
|
| 842 |
+
intent_data = intent_classifier.classify_intent(message)
|
| 843 |
+
|
| 844 |
+
# ========================
|
| 845 |
+
# HANDLE CASUAL CHAT & QUESTIONS
|
| 846 |
+
# ========================
|
| 847 |
+
if intent_data["intent"] in ["casual_chat", "question"] and current_state == ConversationState.INITIAL:
|
| 848 |
+
friendly_response = intent_classifier.get_friendly_response(intent_data["intent"], message)
|
| 849 |
+
session_manager.add_message(session_id, "assistant", friendly_response)
|
| 850 |
+
yield format_chat_history(history, message, friendly_response)
|
| 851 |
+
return
|
| 852 |
+
|
| 853 |
+
# ========================
|
| 854 |
+
# HANDLE UNCLEAR INTENT
|
| 855 |
+
# ========================
|
| 856 |
+
if intent_data["intent"] == "unclear" and current_state == ConversationState.INITIAL:
|
| 857 |
+
friendly_response = intent_classifier.get_friendly_response("unclear", message)
|
| 858 |
+
session_manager.add_message(session_id, "assistant", friendly_response)
|
| 859 |
+
yield format_chat_history(history, message, friendly_response)
|
| 860 |
+
return
|
| 861 |
+
|
| 862 |
+
# ========================
|
| 863 |
+
# STATE: INITIAL - Generate Pipeline ONLY if intent requires it
|
| 864 |
# ========================
|
| 865 |
if current_state == ConversationState.INITIAL:
|
| 866 |
+
# Only generate pipeline if user explicitly requested it
|
| 867 |
+
if not intent_data.get("requires_pipeline", False):
|
| 868 |
+
# Not a pipeline request - give friendly response
|
| 869 |
+
friendly_response = "I'm here to help process documents! Please tell me what you'd like to do with your document.\n\nFor example:\n- 'extract text and summarize'\n- 'get tables from pages 2-5'\n- 'translate to Spanish'\n\nType 'help' to see all capabilities!"
|
| 870 |
+
session_manager.add_message(session_id, "assistant", friendly_response)
|
| 871 |
+
yield format_chat_history(history, message, friendly_response)
|
| 872 |
+
return
|
| 873 |
+
|
| 874 |
# Check if file is uploaded
|
| 875 |
if not session.get("current_file"):
|
| 876 |
+
response_text = "π Please upload a document first before I can process it!\n\nClick the 'Upload Document' button to get started."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 877 |
session_manager.add_message(session_id, "assistant", response_text)
|
| 878 |
yield format_chat_history(history, message, response_text)
|
| 879 |
return
|
| 880 |
|
| 881 |
try:
|
| 882 |
# Generate pipeline using Bedrock β Gemini fallback
|
| 883 |
+
yield format_chat_history(history, message, "π€ Analyzing your request and creating a pipeline...\nβ³ This will take just a moment...")
|
| 884 |
|
| 885 |
pipeline = generate_pipeline(
|
| 886 |
user_input=message,
|
|
|
|
| 894 |
"state": ConversationState.PIPELINE_PROPOSED
|
| 895 |
})
|
| 896 |
|
| 897 |
+
# Create user-friendly display
|
| 898 |
+
pipeline_name = pipeline.get("pipeline_name", "Document Processing")
|
| 899 |
+
steps_list = pipeline.get("pipeline_steps", [])
|
| 900 |
+
steps_summary = "\n".join([f" {i+1}. **{step.get('tool', 'Unknown')}**" for i, step in enumerate(steps_list)])
|
| 901 |
|
| 902 |
+
friendly_display = f"""π― **Pipeline Created: {pipeline_name}**
|
| 903 |
+
|
| 904 |
+
Here's what I'll do:
|
| 905 |
+
{steps_summary}
|
| 906 |
+
|
| 907 |
+
**Ready to proceed?**
|
| 908 |
+
- Type **'approve'** or **'yes'** to execute
|
| 909 |
+
- Type **'reject'** or **'no'** to cancel
|
| 910 |
+
- Describe changes to modify the plan"""
|
| 911 |
+
|
| 912 |
+
# Add technical details in collapsible format
|
| 913 |
+
response_text = friendly_display + f"\n\n<details>\n<summary>π Technical Details (for developers)</summary>\n\n```json\n{json.dumps(pipeline, indent=2)}\n```\n</details>"
|
| 914 |
|
| 915 |
session_manager.add_message(session_id, "assistant", response_text)
|
| 916 |
yield format_chat_history(history, message, response_text)
|
| 917 |
return
|
| 918 |
|
| 919 |
except Exception as e:
|
| 920 |
+
error_response = f"β **Oops!** I encountered an error while creating the pipeline:\n\n{str(e)}\n\nPlease try rephrasing your request or type 'help' for examples."
|
| 921 |
+
session_manager.add_message(session_id, "assistant", error_response)
|
| 922 |
+
yield format_chat_history(history, message, error_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 923 |
return
|
| 924 |
+
|
| 925 |
|
| 926 |
# ========================
|
| 927 |
# STATE: PIPELINE_PROPOSED - Handle Approval/Rejection
|
|
|
|
| 935 |
|
| 936 |
plan = session.get("proposed_pipeline", {})
|
| 937 |
|
| 938 |
+
# Initial status - User-friendly
|
| 939 |
+
initial_message = f"β
**Approved!** Starting execution of: **{plan.get('pipeline_name', 'pipeline')}**\n\nπ Processingplease wait...\n_(Using {plan.get('_generator', 'AI')} - {plan.get('_model', 'model')})_"
|
| 940 |
+
yield format_chat_history(history, message, initial_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 941 |
|
| 942 |
steps_completed = []
|
| 943 |
final_payload = None
|
| 944 |
executor_used = "unknown"
|
| 945 |
+
progress_messages = []
|
| 946 |
|
| 947 |
try:
|
| 948 |
# Execute pipeline with Bedrock β CrewAI fallback
|
|
|
|
| 956 |
|
| 957 |
# Info events (fallback notifications, etc.)
|
| 958 |
if event_type == "info":
|
| 959 |
+
info_message = f"βΉοΈ {event.get('message')}\n_(Executor: {event.get('executor', 'unknown')})_"
|
| 960 |
+
progress_messages.append(info_message)
|
| 961 |
+
accumulated_response = initial_message + "\n\n" + "\n".join(progress_messages)
|
|
|
|
|
|
|
|
|
|
| 962 |
yield format_chat_history(history, message, accumulated_response)
|
| 963 |
|
| 964 |
# Step updates
|
|
|
|
| 981 |
steps_completed.append(step_info)
|
| 982 |
executor_used = event.get("executor", executor_used)
|
| 983 |
|
| 984 |
+
# Create user-friendly progress message
|
| 985 |
+
step_num = event.get('step', 0)
|
| 986 |
+
tool_name = event.get('tool', 'processing')
|
| 987 |
+
|
| 988 |
if event.get('status') == 'completed' and 'observation' in event:
|
| 989 |
+
obs_preview = str(event.get('observation'))[:80]
|
| 990 |
+
step_message = f"β
**Step {step_num}:** {tool_name} - Completed!\n _Preview: {obs_preview}..._"
|
| 991 |
elif event.get('status') == 'executing':
|
| 992 |
+
step_message = f"β³ **Step {step_num}:** {tool_name} - Processing..."
|
| 993 |
+
else:
|
| 994 |
+
step_message = f"π **Step {step_num}:** {tool_name}"
|
| 995 |
|
| 996 |
+
progress_messages.append(step_message)
|
| 997 |
+
accumulated_response = initial_message + "\n\n" + "\n\n".join(progress_messages)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 998 |
yield format_chat_history(history, message, accumulated_response)
|
| 999 |
|
| 1000 |
+
|
| 1001 |
|
| 1002 |
# Final result
|
| 1003 |
elif event_type == "final":
|
|
|
|
| 1006 |
|
| 1007 |
# Error
|
| 1008 |
elif event_type == "error":
|
| 1009 |
+
error_msg = event.get("error", "Unknown error")
|
| 1010 |
+
friendly_error = f"β **Pipeline Failed**\n\nError: {error_msg}\n\nCompleted {len(steps_completed)} step(s) before failure.\n\nWhat would you like to do next?"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1011 |
session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 1012 |
+
session_manager.add_message(session_id, "assistant", friendly_error)
|
| 1013 |
+
yield format_chat_history(history, message, friendly_error)
|
| 1014 |
return
|
| 1015 |
|
| 1016 |
# Process final result
|
|
|
|
| 1029 |
executor=executor_used
|
| 1030 |
)
|
| 1031 |
|
| 1032 |
+
# Create user-friendly final response
|
| 1033 |
+
success_count = len([s for s in steps_completed if s.get("status") == "completed"])
|
| 1034 |
+
friendly_final = f"""π **Pipeline Completed Successfully!**
|
| 1035 |
+
|
| 1036 |
+
**Summary:**
|
| 1037 |
+
- Pipeline: {plan.get('pipeline_name', 'Document Processing')}
|
| 1038 |
+
- Total Steps: {len(steps_completed)}
|
| 1039 |
+
- Successful: {success_count}
|
| 1040 |
+
- Executor: {executor_used}
|
| 1041 |
+
|
| 1042 |
+
β
All done! What else would you like me to help you with?
|
| 1043 |
+
|
| 1044 |
+
<details>
|
| 1045 |
+
<summary>π Detailed Results (for developers)</summary>
|
| 1046 |
+
|
| 1047 |
+
```json
|
| 1048 |
+
{json.dumps({"status": "completed", "executor": executor_used, "pipeline": plan.get("pipeline_name"), "result": final_payload, "steps": steps_completed}, indent=2)}
|
| 1049 |
+
```
|
| 1050 |
+
</details>"""
|
| 1051 |
+
final_response = friendly_final
|
| 1052 |
else:
|
| 1053 |
+
final_response = f"β
**Pipeline Completed!**\n\nExecuted {len(steps_completed)} steps using {executor_used}.\n\nReady for your next task!"
|
| 1054 |
session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 1055 |
|
| 1056 |
session_manager.add_message(session_id, "assistant", final_response)
|
|
|
|
| 1058 |
return
|
| 1059 |
|
| 1060 |
except Exception as e:
|
| 1061 |
+
friendly_error = f"β **Pipeline Execution Failed**\n\nError: {str(e)}\n\nCompleted {len(steps_completed)} step(s) before failure.\n\n<details>\n<summary>π Error Details</summary>\n\n```\n{str(e)}\n```\n</details>\n\nWould you like to try again with a different approach?"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1062 |
session_manager.update_session(session_id, {"state": ConversationState.INITIAL})
|
| 1063 |
+
session_manager.add_message(session_id, "assistant", friendly_error)
|
| 1064 |
+
yield format_chat_history(history, message, friendly_error)
|
| 1065 |
return
|
| 1066 |
|
| 1067 |
# REJECT - Cancel the pipeline
|
|
|
|
| 1070 |
"state": ConversationState.INITIAL,
|
| 1071 |
"proposed_pipeline": None
|
| 1072 |
})
|
| 1073 |
+
friendly_rejection = "π No problem! Pipeline cancelled.\n\nWhat else would you like me to help you with?"
|
| 1074 |
+
session_manager.add_message(session_id, "assistant", friendly_rejection)
|
| 1075 |
+
yield format_chat_history(history, message, friendly_rejection)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1076 |
return
|
| 1077 |
|
| 1078 |
# EDIT - Request modifications
|
| 1079 |
elif "edit" in user_input or "modify" in user_input:
|
| 1080 |
current_pipeline = session.get("proposed_pipeline", {})
|
| 1081 |
+
friendly_edit_help = f"""π **Edit Mode**
|
| 1082 |
+
|
| 1083 |
+
Current pipeline: **{current_pipeline.get('pipeline_name', 'Unknown')}**
|
| 1084 |
+
|
| 1085 |
+
Describe what you'd like to change. For example:
|
| 1086 |
+
- "Add summarization at the end"
|
| 1087 |
+
- "Remove table extraction"
|
| 1088 |
+
- "Only process pages 1-3"
|
| 1089 |
+
- "Translate to French instead of Spanish"
|
| 1090 |
+
|
| 1091 |
+
Or type 'approve' to run the current plan as-is."""
|
| 1092 |
+
session_manager.add_message(session_id, "assistant", friendly_edit_help)
|
| 1093 |
+
yield format_chat_history(history, message, friendly_edit_help)
|
|
|
|
|
|
|
| 1094 |
return
|
| 1095 |
|
| 1096 |
# Try to modify pipeline based on user input
|