Spaces:
Sleeping
Sleeping
File size: 4,374 Bytes
3c25c17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | from __future__ import annotations
import uuid
from typing import Generator
from langchain_core.messages import HumanMessage
from agents.graph import app_graph
from agents.state import MathMentorState
from config import Settings, settings
from memory.store import update_feedback
def _make_config(thread_id: str) -> dict:
return {"configurable": {"thread_id": thread_id}}
def run_pipeline(
input_text: str,
input_image: str | None,
input_audio: str | None,
input_mode: str,
thread_id: str,
chat_history: list,
) -> Generator[dict, None, None]:
"""Run the full agent pipeline, yielding partial state updates for streaming."""
if input_mode == "Image" and input_image:
input_type = "image"
raw_input = input_image
elif input_mode == "Audio" and input_audio:
input_type = "audio"
raw_input = input_audio
else:
input_type = "text"
raw_input = input_text
if not settings.is_llm_configured:
yield {
"node": "error",
"output": {"error": "LLM not configured. Set Base URL and Model in Settings or .env file."},
}
return
initial_state: MathMentorState = {
"input_type": input_type,
"raw_input": raw_input,
"needs_human_review": False,
"human_approved": False,
"human_edited_text": "",
"agent_trace": [],
"chat_history": chat_history + [HumanMessage(content=raw_input if input_type == "text" else f"[{input_type} input]")],
"solver_retries": 0,
"retrieved_chunks": [],
"similar_past_problems": [],
"solution_steps": [],
}
config = _make_config(thread_id)
try:
for event in app_graph.stream(initial_state, config, stream_mode="updates"):
for node_name, node_output in event.items():
yield {
"node": node_name,
"output": node_output,
}
except Exception as e:
import traceback
tb = traceback.format_exc()
print(f"[PIPELINE ERROR] {tb}")
yield {
"node": "error",
"output": {"error": f"{e}\n\nTraceback:\n{tb}"},
}
def resume_after_hitl(
thread_id: str,
human_text: str = "",
approved: bool = True,
) -> Generator[dict, None, None]:
"""Resume the graph after HITL interrupt."""
config = _make_config(thread_id)
app_graph.update_state(
config,
{
"human_edited_text": human_text,
"human_approved": approved,
"needs_human_review": False,
},
)
try:
for event in app_graph.stream(None, config, stream_mode="updates"):
for node_name, node_output in event.items():
yield {
"node": node_name,
"output": node_output,
}
except Exception as e:
import traceback
tb = traceback.format_exc()
print(f"[HITL RESUME ERROR] {tb}")
yield {
"node": "error",
"output": {"error": f"{e}\n\nTraceback:\n{tb}"},
}
def submit_feedback(
thread_id: str,
feedback: str,
comment: str = "",
) -> str:
"""Submit user feedback for a solved problem."""
config = _make_config(thread_id)
try:
from memory.store import get_all_records
records = get_all_records()
if records:
last_id = records[-1].get("id", "")
update_feedback(last_id, feedback, comment)
return f"Feedback recorded: {feedback}"
except Exception as e:
return f"Error saving feedback: {e}"
return "No record found to update."
def update_settings(base_url: str, model: str, api_key: str) -> str:
"""Update LLM settings at runtime. Only overwrite fields the user filled in."""
base_url = base_url.strip()
model = model.strip()
api_key = api_key.strip()
if base_url:
settings.llm_base_url = base_url
if model:
settings.llm_model = model
if api_key:
settings.llm_api_key = api_key
if not settings.is_llm_configured:
return "⚠ LLM not configured. Please set Base URL and Model."
return f"Settings updated: {settings.llm_model} @ {settings.llm_base_url}"
def new_thread_id() -> str:
return str(uuid.uuid4())
|