Spaces:
Paused
Paused
Commit ·
2639a1d
1
Parent(s): 08ba182
Configure app for Hugging Face deployment
Browse files- app_gradio.py +2 -30
- graph.py +3 -3
app_gradio.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# app_gradio.py (Final Version
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
import math
|
|
@@ -38,7 +38,6 @@ def get_default_state():
|
|
| 38 |
|
| 39 |
# --- Conversation Management ---
|
| 40 |
def save_conversation(history, state):
|
| 41 |
-
"""Saves the current chat history to a timestamped JSON file."""
|
| 42 |
if not history:
|
| 43 |
return "Nothing to save.", gr.update()
|
| 44 |
|
|
@@ -54,7 +53,6 @@ def save_conversation(history, state):
|
|
| 54 |
return f"Saved to {filename}", gr.update(choices=get_saved_conversations())
|
| 55 |
|
| 56 |
def load_conversation(filepath, state):
|
| 57 |
-
"""Loads a chat history from a JSON file."""
|
| 58 |
if not filepath:
|
| 59 |
return [], state
|
| 60 |
|
|
@@ -66,30 +64,23 @@ def load_conversation(filepath, state):
|
|
| 66 |
return history, state
|
| 67 |
|
| 68 |
def get_saved_conversations():
|
| 69 |
-
"""Returns a list of saved conversation file paths."""
|
| 70 |
return [os.path.join("conversations", f) for f in os.listdir("conversations") if f.endswith(".json")]
|
| 71 |
|
| 72 |
def clear_conversation():
|
| 73 |
-
"""Resets the chat to a new, empty state."""
|
| 74 |
return [], get_default_state(), "Conversation cleared.", "No artifacts generated yet."
|
| 75 |
|
| 76 |
# --- Artifact Management ---
|
| 77 |
def handle_file_upload(file):
|
| 78 |
-
"""Saves an uploaded file to the /uploads directory."""
|
| 79 |
if file is None:
|
| 80 |
return "No file uploaded."
|
| 81 |
|
| 82 |
basename = os.path.basename(file.name)
|
| 83 |
destination_path = os.path.join("uploads", basename)
|
| 84 |
-
|
| 85 |
-
# Gradio's File component creates a temp file, so we move it
|
| 86 |
os.rename(file.name, destination_path)
|
| 87 |
-
|
| 88 |
log.info(f"File uploaded to {destination_path}")
|
| 89 |
return f"Uploaded: {basename}"
|
| 90 |
|
| 91 |
def update_artifact_list():
|
| 92 |
-
"""Scans the outputs directory and returns a list of files."""
|
| 93 |
output_files = os.listdir("outputs")
|
| 94 |
if not output_files:
|
| 95 |
return "No artifacts generated yet."
|
|
@@ -97,7 +88,6 @@ def update_artifact_list():
|
|
| 97 |
markdown_list = "### Generated Artifacts:\n"
|
| 98 |
for f in output_files:
|
| 99 |
markdown_list += f"- `{f}` (in your 'outputs' folder)\n"
|
| 100 |
-
|
| 101 |
return markdown_list
|
| 102 |
|
| 103 |
# --- Core Logic Functions ---
|
|
@@ -106,7 +96,6 @@ def start_estimation(message, history, state):
|
|
| 106 |
state["original_user_message"] = message
|
| 107 |
history.append({"role": "user", "content": message})
|
| 108 |
|
| 109 |
-
# Immediately show user message and disable input
|
| 110 |
yield history, state, gr.update(value="", interactive=False), gr.update(visible=False), gr.update(), "Analyzing request..."
|
| 111 |
|
| 112 |
triage_inputs = {"userInput": message}
|
|
@@ -134,10 +123,8 @@ def start_estimation(message, history, state):
|
|
| 134 |
def execute_main_task(history, state, budget):
|
| 135 |
log.info(f"Executing main task with budget: ${budget}")
|
| 136 |
|
| 137 |
-
# Hide approval box and show initial status
|
| 138 |
yield history, state, gr.update(visible=False), gr.update(value="", interactive=False), "Task approved. Starting execution..."
|
| 139 |
|
| 140 |
-
# Add a placeholder for the assistant's response
|
| 141 |
history.append({"role": "assistant", "content": "..."})
|
| 142 |
|
| 143 |
cost_per_loop = state["estimate"].get('cost_per_loop_usd', 0.05)
|
|
@@ -155,27 +142,19 @@ def execute_main_task(history, state, budget):
|
|
| 155 |
}
|
| 156 |
|
| 157 |
final_response = "An error occurred during execution."
|
| 158 |
-
# Use .stream() to get live updates
|
| 159 |
for step in main_app.stream(initial_state):
|
| 160 |
-
# The key will be the name of the node that just ran
|
| 161 |
node_name = list(step.keys())[0]
|
| 162 |
node_output = step[node_name]
|
| 163 |
|
| 164 |
-
# Update the status display with the latest message
|
| 165 |
status = node_output.get("status_update", "Processing...")
|
| 166 |
yield history, state, gr.update(visible=False), gr.update(interactive=False), status
|
| 167 |
|
| 168 |
-
# Keep track of the final response
|
| 169 |
if "draftResponse" in node_output and node_output["draftResponse"]:
|
| 170 |
final_response = node_output["draftResponse"]
|
| 171 |
|
| 172 |
-
# Update the final message in the chat history
|
| 173 |
history[-1] = {"role": "assistant", "content": final_response}
|
| 174 |
-
|
| 175 |
-
# Final yield to re-enable input and hide status
|
| 176 |
yield history, state, gr.update(visible=False), gr.update(interactive=True), "Task complete. Ready."
|
| 177 |
|
| 178 |
-
|
| 179 |
def cancel_task(history, state):
|
| 180 |
history.append({"role": "assistant", "content": "Task cancelled."})
|
| 181 |
return history, state, gr.update(visible=False), gr.update(interactive=True), "Task cancelled. Ready."
|
|
@@ -190,25 +169,19 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Autonomous AI Lab") as demo:
|
|
| 190 |
with gr.Column(scale=1):
|
| 191 |
gr.Markdown("### Controls")
|
| 192 |
clear_btn = gr.Button("🗑️ New Conversation")
|
| 193 |
-
|
| 194 |
gr.Markdown("### Saved Chats")
|
| 195 |
saved_chats_dropdown = gr.Dropdown(label="Load a past conversation", choices=get_saved_conversations(), interactive=True)
|
| 196 |
save_chat_btn = gr.Button("💾 Save Current Chat")
|
| 197 |
save_status_text = gr.Textbox(label="Save Status", interactive=False)
|
| 198 |
-
|
| 199 |
gr.Markdown("### Artifacts")
|
| 200 |
file_uploader = gr.File(label="Upload a file")
|
| 201 |
upload_status_text = gr.Textbox(label="Upload Status", interactive=False)
|
| 202 |
-
|
| 203 |
refresh_artifacts_btn = gr.Button("🔄 Refresh Artifacts List")
|
| 204 |
artifact_list_display = gr.Markdown("No artifacts generated yet.")
|
| 205 |
|
| 206 |
with gr.Column(scale=4):
|
| 207 |
chatbot = gr.Chatbot(label="Conversation", height=600, avatar_images=(None, "https://i.imgur.com/b5OqI32.png"))
|
| 208 |
-
|
| 209 |
-
# NEW: Status display
|
| 210 |
-
status_display = gr.Markdown("Status: Ready.", elem_classes="status-display")
|
| 211 |
-
|
| 212 |
with gr.Group(visible=False) as approval_box:
|
| 213 |
approval_text = gr.Markdown()
|
| 214 |
with gr.Row():
|
|
@@ -222,7 +195,6 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Autonomous AI Lab") as demo:
|
|
| 222 |
submit_btn = gr.Button("Send", variant="primary", scale=1)
|
| 223 |
|
| 224 |
# --- Event Handlers ---
|
| 225 |
-
# Define all outputs for streaming functions
|
| 226 |
estimation_outputs = [chatbot, state, msg_textbox, approval_box, budget_input, status_display]
|
| 227 |
execution_outputs = [chatbot, state, approval_box, msg_textbox, status_display]
|
| 228 |
cancel_outputs = [chatbot, state, approval_box, msg_textbox, status_display]
|
|
|
|
| 1 |
+
# app_gradio.py (Final Version)
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
import math
|
|
|
|
| 38 |
|
| 39 |
# --- Conversation Management ---
|
| 40 |
def save_conversation(history, state):
|
|
|
|
| 41 |
if not history:
|
| 42 |
return "Nothing to save.", gr.update()
|
| 43 |
|
|
|
|
| 53 |
return f"Saved to {filename}", gr.update(choices=get_saved_conversations())
|
| 54 |
|
| 55 |
def load_conversation(filepath, state):
|
|
|
|
| 56 |
if not filepath:
|
| 57 |
return [], state
|
| 58 |
|
|
|
|
| 64 |
return history, state
|
| 65 |
|
| 66 |
def get_saved_conversations():
|
|
|
|
| 67 |
return [os.path.join("conversations", f) for f in os.listdir("conversations") if f.endswith(".json")]
|
| 68 |
|
| 69 |
def clear_conversation():
|
|
|
|
| 70 |
return [], get_default_state(), "Conversation cleared.", "No artifacts generated yet."
|
| 71 |
|
| 72 |
# --- Artifact Management ---
|
| 73 |
def handle_file_upload(file):
|
|
|
|
| 74 |
if file is None:
|
| 75 |
return "No file uploaded."
|
| 76 |
|
| 77 |
basename = os.path.basename(file.name)
|
| 78 |
destination_path = os.path.join("uploads", basename)
|
|
|
|
|
|
|
| 79 |
os.rename(file.name, destination_path)
|
|
|
|
| 80 |
log.info(f"File uploaded to {destination_path}")
|
| 81 |
return f"Uploaded: {basename}"
|
| 82 |
|
| 83 |
def update_artifact_list():
|
|
|
|
| 84 |
output_files = os.listdir("outputs")
|
| 85 |
if not output_files:
|
| 86 |
return "No artifacts generated yet."
|
|
|
|
| 88 |
markdown_list = "### Generated Artifacts:\n"
|
| 89 |
for f in output_files:
|
| 90 |
markdown_list += f"- `{f}` (in your 'outputs' folder)\n"
|
|
|
|
| 91 |
return markdown_list
|
| 92 |
|
| 93 |
# --- Core Logic Functions ---
|
|
|
|
| 96 |
state["original_user_message"] = message
|
| 97 |
history.append({"role": "user", "content": message})
|
| 98 |
|
|
|
|
| 99 |
yield history, state, gr.update(value="", interactive=False), gr.update(visible=False), gr.update(), "Analyzing request..."
|
| 100 |
|
| 101 |
triage_inputs = {"userInput": message}
|
|
|
|
| 123 |
def execute_main_task(history, state, budget):
|
| 124 |
log.info(f"Executing main task with budget: ${budget}")
|
| 125 |
|
|
|
|
| 126 |
yield history, state, gr.update(visible=False), gr.update(value="", interactive=False), "Task approved. Starting execution..."
|
| 127 |
|
|
|
|
| 128 |
history.append({"role": "assistant", "content": "..."})
|
| 129 |
|
| 130 |
cost_per_loop = state["estimate"].get('cost_per_loop_usd', 0.05)
|
|
|
|
| 142 |
}
|
| 143 |
|
| 144 |
final_response = "An error occurred during execution."
|
|
|
|
| 145 |
for step in main_app.stream(initial_state):
|
|
|
|
| 146 |
node_name = list(step.keys())[0]
|
| 147 |
node_output = step[node_name]
|
| 148 |
|
|
|
|
| 149 |
status = node_output.get("status_update", "Processing...")
|
| 150 |
yield history, state, gr.update(visible=False), gr.update(interactive=False), status
|
| 151 |
|
|
|
|
| 152 |
if "draftResponse" in node_output and node_output["draftResponse"]:
|
| 153 |
final_response = node_output["draftResponse"]
|
| 154 |
|
|
|
|
| 155 |
history[-1] = {"role": "assistant", "content": final_response}
|
|
|
|
|
|
|
| 156 |
yield history, state, gr.update(visible=False), gr.update(interactive=True), "Task complete. Ready."
|
| 157 |
|
|
|
|
| 158 |
def cancel_task(history, state):
|
| 159 |
history.append({"role": "assistant", "content": "Task cancelled."})
|
| 160 |
return history, state, gr.update(visible=False), gr.update(interactive=True), "Task cancelled. Ready."
|
|
|
|
| 169 |
with gr.Column(scale=1):
|
| 170 |
gr.Markdown("### Controls")
|
| 171 |
clear_btn = gr.Button("🗑️ New Conversation")
|
|
|
|
| 172 |
gr.Markdown("### Saved Chats")
|
| 173 |
saved_chats_dropdown = gr.Dropdown(label="Load a past conversation", choices=get_saved_conversations(), interactive=True)
|
| 174 |
save_chat_btn = gr.Button("💾 Save Current Chat")
|
| 175 |
save_status_text = gr.Textbox(label="Save Status", interactive=False)
|
|
|
|
| 176 |
gr.Markdown("### Artifacts")
|
| 177 |
file_uploader = gr.File(label="Upload a file")
|
| 178 |
upload_status_text = gr.Textbox(label="Upload Status", interactive=False)
|
|
|
|
| 179 |
refresh_artifacts_btn = gr.Button("🔄 Refresh Artifacts List")
|
| 180 |
artifact_list_display = gr.Markdown("No artifacts generated yet.")
|
| 181 |
|
| 182 |
with gr.Column(scale=4):
|
| 183 |
chatbot = gr.Chatbot(label="Conversation", height=600, avatar_images=(None, "https://i.imgur.com/b5OqI32.png"))
|
| 184 |
+
status_display = gr.Markdown("Status: Ready.")
|
|
|
|
|
|
|
|
|
|
| 185 |
with gr.Group(visible=False) as approval_box:
|
| 186 |
approval_text = gr.Markdown()
|
| 187 |
with gr.Row():
|
|
|
|
| 195 |
submit_btn = gr.Button("Send", variant="primary", scale=1)
|
| 196 |
|
| 197 |
# --- Event Handlers ---
|
|
|
|
| 198 |
estimation_outputs = [chatbot, state, msg_textbox, approval_box, budget_input, status_display]
|
| 199 |
execution_outputs = [chatbot, state, approval_box, msg_textbox, status_display]
|
| 200 |
cancel_outputs = [chatbot, state, approval_box, msg_textbox, status_display]
|
graph.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
-
# graph.py (Final)
|
| 2 |
|
| 3 |
import json
|
| 4 |
import re
|
| 5 |
import math
|
| 6 |
from typing import TypedDict, List, Dict, Optional
|
| 7 |
from langchain_openai import ChatOpenAI
|
| 8 |
-
from langgraph.graph import StateGraph, END
|
| 9 |
from memory_manager import memory_manager
|
| 10 |
from code_executor import execute_python_code
|
| 11 |
from logging_config import setup_logging, get_logger
|
|
@@ -52,7 +52,7 @@ def parse_json_from_llm(llm_output: str) -> Optional[dict]:
|
|
| 52 |
# --- LLM Initialization ---
|
| 53 |
llm = ChatOpenAI(model="gpt-4o", temperature=0.1, max_retries=3, request_timeout=60)
|
| 54 |
|
| 55 |
-
# --- Agent Node Functions
|
| 56 |
def run_triage_agent(state: AgentState):
|
| 57 |
log.info("--- triage ---")
|
| 58 |
prompt = f"Analyze the user input. Is it a simple conversational greeting or a task? Respond with 'greeting' or 'task'.\n\nUser Input: \"{state['userInput']}\""
|
|
|
|
| 1 |
+
# graph.py (Final Version)
|
| 2 |
|
| 3 |
import json
|
| 4 |
import re
|
| 5 |
import math
|
| 6 |
from typing import TypedDict, List, Dict, Optional
|
| 7 |
from langchain_openai import ChatOpenAI
|
| 8 |
+
from langgraph.graph import StateGraph, END
|
| 9 |
from memory_manager import memory_manager
|
| 10 |
from code_executor import execute_python_code
|
| 11 |
from logging_config import setup_logging, get_logger
|
|
|
|
| 52 |
# --- LLM Initialization ---
|
| 53 |
llm = ChatOpenAI(model="gpt-4o", temperature=0.1, max_retries=3, request_timeout=60)
|
| 54 |
|
| 55 |
+
# --- Agent Node Functions ---
|
| 56 |
def run_triage_agent(state: AgentState):
|
| 57 |
log.info("--- triage ---")
|
| 58 |
prompt = f"Analyze the user input. Is it a simple conversational greeting or a task? Respond with 'greeting' or 'task'.\n\nUser Input: \"{state['userInput']}\""
|