File size: 3,162 Bytes
d19e6fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c0f32b
d19e6fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
from typing import List, Optional, TypedDict
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langgraph.graph import StateGraph, END
from pydantic import BaseModel, Field

# Define the output structure for the LLM
class QuestionOutput(BaseModel):
    question_text: str = Field(description="The text of the question")
    options: List[str] = Field(description="A list of options including distractors and the correct answer")
    correct_option_index: int = Field(description="The index of the correct option in the options list")

# Define the state of the graph
class AgentState(TypedDict):
    qcm_description: str
    existing_questions: List[str]
    document_content: Optional[str]
    generated_question: Optional[dict]

# Initialize the LLM
# Ensure OPENAI_API_KEY is set in the environment
#GOOGLE_API_KEY = ""
try:
    llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-lite", temperature=0.7)
except Exception as e:
    print(f"Warning: Failed to initialize ChatOpenAI: {e}")
    llm = None

def generate_question_node(state: AgentState):
    """
    Generates a new question based on the description and existing questions.
    """
    if not llm:
        return {"generated_question": None}

    description = state["qcm_description"]
    existing = "\n".join([f"- {q}" for q in state["existing_questions"]])
    
    parser = JsonOutputParser(pydantic_object=QuestionOutput)
    
    document_content = state.get("document_content", "")
    
    prompt_text = "QCM Description: {description}\n\n"
    if document_content:
        prompt_text += "Document Content:\n{document_content}\n\n"
        
    prompt_text += "Existing Questions:\n{existing}\n\n"
    prompt_text += "Generate a new question:\n{format_instructions}"

    prompt = ChatPromptTemplate.from_messages([
        ("system", "You are an expert teacher assistant helping to create a Multiple Choice Question (QCM). "
                   "Your goal is to generate a NEW, unique question based on the QCM description and the provided document content (if any). "
                   "Avoid duplicating any of the existing questions. "
                   "Provide the output in JSON format with 'question_text', 'options' (list of strings), and 'correct_option_index' (int)."),
        ("user", prompt_text)
    ])
    
    chain = prompt | llm | parser
    
    try:
        result = chain.invoke({
            "description": description,
            "existing": existing,
            "document_content": document_content,
            "format_instructions": parser.get_format_instructions()
        })
        
        return {"generated_question": result}
    except Exception as e:
        # Fallback or error handling
        print(f"Error generating question: {e}")
        return {"generated_question": None}

# Build the graph
workflow = StateGraph(AgentState)
workflow.add_node("generate_question", generate_question_node)
workflow.set_entry_point("generate_question")
workflow.add_edge("generate_question", END)

app = workflow.compile()