from typing import List, Optional, TypedDict, Literal, Dict, Any from pydantic import BaseModel, Field from langchain_core.messages import BaseMessage # ── LLM Structured Output Models ────────────────────────────────────────────── class EvaluationOutput(BaseModel): """Structured output from the reasoning analyst.""" problem_topic: str identified_gap: str gap_magnitude: int = Field( ..., description="0-10 scale where 0 is correct/optimal and 10 is completely wrong/missing.", ge=0, le=10, ) reasoning: str # Explain-why-wrong fields mistake: Optional[str] = Field( None, description="The specific mistake the user made (e.g., 'Used O(N²) nested loop')." ) why_wrong: Optional[str] = Field( None, description="Why this approach fails (e.g., 'This exceeds time limit for N=10^5')." ) correct_thinking: Optional[str] = Field( None, description="The correct direction to think (e.g., 'Consider a hashmap for O(1) lookup').", ) class HintOutput(BaseModel): """Structured output for the hint generator.""" hint: str type: str = Field(description="e.g., Conceptual, Approach, Data Structure, Code") escalation_level: int = Field( 1, description="1=Conceptual, 2=Approach, 3=Pseudocode, 4=Code snippet", ge=1, le=4, ) class SolutionOutput(BaseModel): """Structured output for the solution revealer.""" solution_code: str explanation: str complexity_analysis: str # ── Agent State ──────────────────────────────────────────────────────────────── class AgentState(TypedDict): """The state of the agent's graph — shared memory between all nodes.""" # ── User Input ── problem: str user_thought: str code: Optional[str] strictness: Literal["Strict", "Moderate", "Lenient"] request_mode: Literal["analyze", "hint", "solution", "hint_forced"] session_id: str # Identifies the user for persistent memory # ── Internal Processing ── problem_topic: Optional[str] identified_gap: Optional[str] gap_magnitude: int current_hint_level: int # 1=Conceptual, 2=Approach, 3=Pseudocode, 4=Code turn_count: int # Loop protection counter # ── Code Evaluation ── test_pass_rate: Optional[float] # 0.0–1.0 from sandbox runner # ── Explain-Why-Wrong ── mistake: Optional[str] why_wrong: Optional[str] correct_thinking: Optional[str] # ── Output to User ── messages: List[BaseMessage] final_response: Optional[Dict[str, Any]] # ── User Memory Profile ──────────────────────────────────────────────────────── class UserProfile(BaseModel): """Persistent learning profile for a session.""" session_id: str weak_topics: Dict[str, int] = Field( default_factory=dict, description="Maps DSA topic → cumulative weakness score.", ) solved_problems: int = 0 total_turns: int = 0 avg_gap: float = 0.0