Multimodal_Math_Mentor / ui /callbacks.py
Amit-kr26's picture
Initial commit: Multimodal Math Mentor
3c25c17
from __future__ import annotations
import uuid
from typing import Generator
from langchain_core.messages import HumanMessage
from agents.graph import app_graph
from agents.state import MathMentorState
from config import Settings, settings
from memory.store import update_feedback
def _make_config(thread_id: str) -> dict:
return {"configurable": {"thread_id": thread_id}}
def run_pipeline(
input_text: str,
input_image: str | None,
input_audio: str | None,
input_mode: str,
thread_id: str,
chat_history: list,
) -> Generator[dict, None, None]:
"""Run the full agent pipeline, yielding partial state updates for streaming."""
if input_mode == "Image" and input_image:
input_type = "image"
raw_input = input_image
elif input_mode == "Audio" and input_audio:
input_type = "audio"
raw_input = input_audio
else:
input_type = "text"
raw_input = input_text
if not settings.is_llm_configured:
yield {
"node": "error",
"output": {"error": "LLM not configured. Set Base URL and Model in Settings or .env file."},
}
return
initial_state: MathMentorState = {
"input_type": input_type,
"raw_input": raw_input,
"needs_human_review": False,
"human_approved": False,
"human_edited_text": "",
"agent_trace": [],
"chat_history": chat_history + [HumanMessage(content=raw_input if input_type == "text" else f"[{input_type} input]")],
"solver_retries": 0,
"retrieved_chunks": [],
"similar_past_problems": [],
"solution_steps": [],
}
config = _make_config(thread_id)
try:
for event in app_graph.stream(initial_state, config, stream_mode="updates"):
for node_name, node_output in event.items():
yield {
"node": node_name,
"output": node_output,
}
except Exception as e:
import traceback
tb = traceback.format_exc()
print(f"[PIPELINE ERROR] {tb}")
yield {
"node": "error",
"output": {"error": f"{e}\n\nTraceback:\n{tb}"},
}
def resume_after_hitl(
thread_id: str,
human_text: str = "",
approved: bool = True,
) -> Generator[dict, None, None]:
"""Resume the graph after HITL interrupt."""
config = _make_config(thread_id)
app_graph.update_state(
config,
{
"human_edited_text": human_text,
"human_approved": approved,
"needs_human_review": False,
},
)
try:
for event in app_graph.stream(None, config, stream_mode="updates"):
for node_name, node_output in event.items():
yield {
"node": node_name,
"output": node_output,
}
except Exception as e:
import traceback
tb = traceback.format_exc()
print(f"[HITL RESUME ERROR] {tb}")
yield {
"node": "error",
"output": {"error": f"{e}\n\nTraceback:\n{tb}"},
}
def submit_feedback(
thread_id: str,
feedback: str,
comment: str = "",
) -> str:
"""Submit user feedback for a solved problem."""
config = _make_config(thread_id)
try:
from memory.store import get_all_records
records = get_all_records()
if records:
last_id = records[-1].get("id", "")
update_feedback(last_id, feedback, comment)
return f"Feedback recorded: {feedback}"
except Exception as e:
return f"Error saving feedback: {e}"
return "No record found to update."
def update_settings(base_url: str, model: str, api_key: str) -> str:
"""Update LLM settings at runtime. Only overwrite fields the user filled in."""
base_url = base_url.strip()
model = model.strip()
api_key = api_key.strip()
if base_url:
settings.llm_base_url = base_url
if model:
settings.llm_model = model
if api_key:
settings.llm_api_key = api_key
if not settings.is_llm_configured:
return "⚠ LLM not configured. Please set Base URL and Model."
return f"Settings updated: {settings.llm_model} @ {settings.llm_base_url}"
def new_thread_id() -> str:
return str(uuid.uuid4())