Spaces:
Paused
Paused
| # app_gradio.py (Final Version) | |
| import gradio as gr | |
| import math | |
| import os | |
| import json | |
| from datetime import datetime | |
| from dotenv import load_dotenv | |
| # Import your existing agentic logic | |
| from graph import triage_app, planner_app, main_app | |
| from logging_config import get_logger | |
| # --- Setup & Configuration --- | |
| load_dotenv() | |
| log = get_logger(__name__) | |
| # Create necessary directories on startup | |
| os.makedirs("outputs", exist_ok=True) | |
| os.makedirs("uploads", exist_ok=True) | |
| os.makedirs("conversations", exist_ok=True) | |
| # --- Authentication --- | |
| USERS = { | |
| "tester1": "pass123", | |
| "researcher": "lab456", | |
| "admin": "admin789" | |
| } | |
| # --- State Management --- | |
| def get_default_state(): | |
| """Initializes a new session state.""" | |
| return { | |
| "original_user_message": "", | |
| "estimate": {}, | |
| "current_conversation_file": None, | |
| } | |
| # --- Conversation Management --- | |
| def save_conversation(history, state): | |
| if not history: | |
| return "Nothing to save.", gr.update() | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| filename = f"conv_{timestamp}.json" | |
| filepath = os.path.join("conversations", filename) | |
| with open(filepath, 'w', encoding='utf-8') as f: | |
| json.dump(history, f, indent=2) | |
| state["current_conversation_file"] = filepath | |
| log.info(f"Conversation saved to {filepath}") | |
| return f"Saved to {filename}", gr.update(choices=get_saved_conversations()) | |
| def load_conversation(filepath, state): | |
| if not filepath: | |
| return [], state | |
| with open(filepath, 'r', encoding='utf-8') as f: | |
| history = json.load(f) | |
| state["current_conversation_file"] = filepath | |
| log.info(f"Conversation loaded from {filepath}") | |
| return history, state | |
| def get_saved_conversations(): | |
| return [os.path.join("conversations", f) for f in os.listdir("conversations") if f.endswith(".json")] | |
| def clear_conversation(): | |
| return [], get_default_state(), "Conversation cleared.", "No artifacts generated yet." | |
| # --- Artifact Management --- | |
| def handle_file_upload(file): | |
| if file is None: | |
| return "No file uploaded." | |
| basename = os.path.basename(file.name) | |
| destination_path = os.path.join("uploads", basename) | |
| os.rename(file.name, destination_path) | |
| log.info(f"File uploaded to {destination_path}") | |
| return f"Uploaded: {basename}" | |
| def update_artifact_list(): | |
| output_files = os.listdir("outputs") | |
| if not output_files: | |
| return "No artifacts generated yet." | |
| markdown_list = "### Generated Artifacts:\n" | |
| for f in output_files: | |
| markdown_list += f"- `{f}` (in your 'outputs' folder)\n" | |
| return markdown_list | |
| # --- Core Logic Functions --- | |
| def start_estimation(message, history, state): | |
| log.info(f"Starting estimation for: '{message}'") | |
| state["original_user_message"] = message | |
| history.append({"role": "user", "content": message}) | |
| yield history, state, gr.update(value="", interactive=False), gr.update(visible=False), gr.update(), "Analyzing request..." | |
| triage_inputs = {"userInput": message} | |
| triage_result = triage_app.invoke(triage_inputs) | |
| if triage_result.get("draftResponse"): | |
| history.append({"role": "assistant", "content": triage_result["draftResponse"]}) | |
| yield history, state, gr.update(interactive=True), gr.update(visible=False), gr.update(), "Ready." | |
| return | |
| planner_inputs = {"userInput": message} | |
| estimate_result = planner_app.invoke(planner_inputs) | |
| estimate = estimate_result.get('pmPlan', {}) | |
| state["estimate"] = estimate | |
| if estimate.get("error"): | |
| error_msg = f"Error during planning: {estimate['error']}" | |
| history.append({"role": "assistant", "content": error_msg}) | |
| yield history, state, gr.update(interactive=True), gr.update(visible=False), gr.update(value=0.10), error_msg | |
| else: | |
| plan_text = "\n".join([f"- {step}" for step in estimate.get('plan', [])]) | |
| approval_text_md = (f"**Here is my plan:**\n{plan_text}\n\nThis may require up to **{estimate.get('max_loops_initial', 0) + 1} attempts** and could cost approximately **${estimate.get('estimated_cost_usd', 0.0)}**.") | |
| yield history, state, gr.update(interactive=False), gr.update(visible=True, value=approval_text_md), gr.update(value=estimate.get('estimated_cost_usd', 0.10)), "Awaiting your approval to proceed." | |
| def execute_main_task(history, state, budget): | |
| log.info(f"Executing main task with budget: ${budget}") | |
| yield history, state, gr.update(visible=False), gr.update(value="", interactive=False), "Task approved. Starting execution..." | |
| history.append({"role": "assistant", "content": "..."}) | |
| cost_per_loop = state["estimate"].get('cost_per_loop_usd', 0.05) | |
| if cost_per_loop > 0: | |
| total_runs_affordable = max(1, math.floor(float(budget) / cost_per_loop)) | |
| max_loops_calibrated = total_runs_affordable - 1 | |
| else: | |
| max_loops_calibrated = 0 | |
| initial_state = { | |
| "userInput": state["original_user_message"], | |
| "chatHistory": [], | |
| "max_loops": max_loops_calibrated, | |
| "status_update": "Task approved. Starting execution..." | |
| } | |
| final_response = "An error occurred during execution." | |
| for step in main_app.stream(initial_state): | |
| node_name = list(step.keys())[0] | |
| node_output = step[node_name] | |
| status = node_output.get("status_update", "Processing...") | |
| yield history, state, gr.update(visible=False), gr.update(interactive=False), status | |
| if "draftResponse" in node_output and node_output["draftResponse"]: | |
| final_response = node_output["draftResponse"] | |
| history[-1] = {"role": "assistant", "content": final_response} | |
| yield history, state, gr.update(visible=False), gr.update(interactive=True), "Task complete. Ready." | |
| def cancel_task(history, state): | |
| history.append({"role": "assistant", "content": "Task cancelled."}) | |
| return history, state, gr.update(visible=False), gr.update(interactive=True), "Task cancelled. Ready." | |
| # --- Gradio UI Definition --- | |
| with gr.Blocks(theme=gr.themes.Soft(), title="Autonomous AI Lab") as demo: | |
| state = gr.State(value=get_default_state()) | |
| gr.Markdown("# Autonomous AI Lab") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### Controls") | |
| clear_btn = gr.Button("🗑️ New Conversation") | |
| gr.Markdown("### Saved Chats") | |
| saved_chats_dropdown = gr.Dropdown(label="Load a past conversation", choices=get_saved_conversations(), interactive=True) | |
| save_chat_btn = gr.Button("💾 Save Current Chat") | |
| save_status_text = gr.Textbox(label="Save Status", interactive=False) | |
| gr.Markdown("### Artifacts") | |
| file_uploader = gr.File(label="Upload a file") | |
| upload_status_text = gr.Textbox(label="Upload Status", interactive=False) | |
| refresh_artifacts_btn = gr.Button("🔄 Refresh Artifacts List") | |
| artifact_list_display = gr.Markdown("No artifacts generated yet.") | |
| with gr.Column(scale=4): | |
| chatbot = gr.Chatbot(label="Conversation", height=600, avatar_images=(None, "https://i.imgur.com/b5OqI32.png")) | |
| status_display = gr.Markdown("Status: Ready.") | |
| with gr.Group(visible=False) as approval_box: | |
| approval_text = gr.Markdown() | |
| with gr.Row(): | |
| budget_input = gr.Number(label="Set your maximum budget ($)", value=0.10, minimum=0.01, step=0.05) | |
| with gr.Row(): | |
| proceed_btn = gr.Button("✅ Approve & Proceed", variant="primary") | |
| cancel_btn = gr.Button("❌ Cancel") | |
| with gr.Row(): | |
| msg_textbox = gr.Textbox(label="Your Message", placeholder="Ask a question or describe a task...", scale=7) | |
| submit_btn = gr.Button("Send", variant="primary", scale=1) | |
| # --- Event Handlers --- | |
| estimation_outputs = [chatbot, state, msg_textbox, approval_box, budget_input, status_display] | |
| execution_outputs = [chatbot, state, approval_box, msg_textbox, status_display] | |
| cancel_outputs = [chatbot, state, approval_box, msg_textbox, status_display] | |
| clear_outputs = [chatbot, state, save_status_text, artifact_list_display] | |
| msg_textbox.submit(fn=start_estimation, inputs=[msg_textbox, chatbot, state], outputs=estimation_outputs) | |
| submit_btn.click(fn=start_estimation, inputs=[msg_textbox, chatbot, state], outputs=estimation_outputs) | |
| proceed_btn.click(fn=execute_main_task, inputs=[chatbot, state, budget_input], outputs=execution_outputs) | |
| cancel_btn.click(fn=cancel_task, inputs=[chatbot, state], outputs=cancel_outputs) | |
| clear_btn.click(fn=clear_conversation, inputs=[], outputs=clear_outputs) | |
| save_chat_btn.click(fn=save_conversation, inputs=[chatbot, state], outputs=[save_status_text, saved_chats_dropdown]) | |
| saved_chats_dropdown.change(fn=load_conversation, inputs=[saved_chats_dropdown, state], outputs=[chatbot, state]) | |
| file_uploader.upload(fn=handle_file_upload, inputs=[file_uploader], outputs=[upload_status_text]) | |
| refresh_artifacts_btn.click(fn=update_artifact_list, inputs=[], outputs=[artifact_list_display]) | |
| # --- Launch Configuration --- | |
| if __name__ == "__main__": | |
| demo.launch( | |
| share=True, | |
| server_name="0.0.0.0", | |
| auth=[(user, pwd) for user, pwd in USERS.items()], | |
| auth_message="Enter your credentials to access the Autonomous AI Lab." | |
| ) |