Spaces:
Sleeping
Sleeping
| """ | |
| hint_node.py β Generates an adaptive, escalating hint for the user. | |
| Improvements over v1: | |
| - Uses llm.with_structured_output() for guaranteed schema compliance | |
| - 4-level escalation system (Conceptual β Approach β Pseudocode β Code) | |
| - Personalizes based on UserProfile weak topics | |
| - Injects misconception library for topic-targeted hints | |
| - Increments turn_count for loop control | |
| """ | |
| from agent.models import AgentState, HintOutput | |
| from agent.llm_factory import get_llm | |
| from agent.prompts import HINT_PROMPT | |
| from agent.knowledge import get_misconceptions | |
| from agent.memory import load_profile, top_weak_topics | |
| _llm = get_llm() | |
| _structured_llm = _llm.with_structured_output(HintOutput, method="function_calling") | |
| _MAX_HINT_LEVEL = 4 | |
| def generate_hint(state: AgentState) -> dict: | |
| """ | |
| Generates a hint based on the gap, strictness, hint level, and user profile. | |
| Increments current_hint_level and turn_count. | |
| Populates explain-why-wrong fields in final_response if present. | |
| """ | |
| gap = state["identified_gap"] | |
| topic = state.get("problem_topic", "Unknown") | |
| hint_level = min(state.get("current_hint_level", 1), _MAX_HINT_LEVEL) | |
| session_id = state.get("session_id", "anonymous") | |
| # ββ Determine effective strictness (personalize for weak topics) βββββββββ | |
| strictness = state["strictness"] | |
| try: | |
| profile = load_profile(session_id) | |
| weak = top_weak_topics(profile, n=3) | |
| if topic.strip().lower() in weak: | |
| # Promote to Lenient if user consistently struggles with this topic | |
| if strictness == "Strict": | |
| strictness = "Moderate" | |
| elif strictness == "Moderate": | |
| strictness = "Lenient" | |
| weak_topics_str = ", ".join(weak) if weak else "none yet" | |
| except Exception: | |
| weak_topics_str = "none yet" | |
| misconceptions = "; ".join(get_misconceptions(topic)) | |
| # ββ Build prompt and invoke structured LLM βββββββββββββββββββββββββββββββ | |
| try: | |
| result: HintOutput = _structured_llm.invoke( | |
| HINT_PROMPT.format_messages( | |
| strictness=strictness, | |
| topic=topic, | |
| gap=gap, | |
| hint_level=hint_level, | |
| weak_topics=weak_topics_str, | |
| misconceptions=misconceptions, | |
| ) | |
| ) | |
| hint_text = result.hint | |
| hint_type = result.type | |
| except Exception as e: | |
| print(f"[hint_node] Structured output error: {e}") | |
| hint_text = f"LLM Parsing Error: {str(e)}" | |
| hint_type = "Error" | |
| # ββ Score: decreases with gap magnitude ββββββββββββββββββββββββββββββββββ | |
| score = max(0, 100 - (state["gap_magnitude"] * 10)) | |
| # ββ Build final_response β include explain-why-wrong when available ββββββ | |
| final_response: dict = { | |
| "hint": hint_text, | |
| "type": hint_type, | |
| "score": score, | |
| "hint_level": hint_level, | |
| } | |
| if state.get("mistake"): | |
| final_response["mistake"] = state["mistake"] | |
| final_response["why_wrong"] = state.get("why_wrong") | |
| final_response["correct_thinking"] = state.get("correct_thinking") | |
| return { | |
| "final_response": final_response, | |
| "current_hint_level": min(hint_level + 1, _MAX_HINT_LEVEL), | |
| "turn_count": state.get("turn_count", 0) + 1, | |
| } | |