Spaces:
Sleeping
Sleeping
| import os | |
| from typing import List, Optional, TypedDict | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langchain_core.output_parsers import JsonOutputParser | |
| from langgraph.graph import StateGraph, END | |
| from pydantic import BaseModel, Field | |
| # Define the output structure for the LLM | |
| class QuestionOutput(BaseModel): | |
| question_text: str = Field(description="The text of the question") | |
| options: List[str] = Field(description="A list of options including distractors and the correct answer") | |
| correct_option_index: int = Field(description="The index of the correct option in the options list") | |
| # Define the state of the graph | |
| class AgentState(TypedDict): | |
| qcm_description: str | |
| existing_questions: List[str] | |
| document_content: Optional[str] | |
| generated_question: Optional[dict] | |
| # Initialize the LLM | |
| # Ensure OPENAI_API_KEY is set in the environment | |
| #GOOGLE_API_KEY = "" | |
| try: | |
| llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-lite", temperature=0.7) | |
| except Exception as e: | |
| print(f"Warning: Failed to initialize ChatOpenAI: {e}") | |
| llm = None | |
| def generate_question_node(state: AgentState): | |
| """ | |
| Generates a new question based on the description and existing questions. | |
| """ | |
| if not llm: | |
| return {"generated_question": None} | |
| description = state["qcm_description"] | |
| existing = "\n".join([f"- {q}" for q in state["existing_questions"]]) | |
| parser = JsonOutputParser(pydantic_object=QuestionOutput) | |
| document_content = state.get("document_content", "") | |
| prompt_text = "QCM Description: {description}\n\n" | |
| if document_content: | |
| prompt_text += "Document Content:\n{document_content}\n\n" | |
| prompt_text += "Existing Questions:\n{existing}\n\n" | |
| prompt_text += "Generate a new question:\n{format_instructions}" | |
| prompt = ChatPromptTemplate.from_messages([ | |
| ("system", "You are an expert teacher assistant helping to create a Multiple Choice Question (QCM). " | |
| "Your goal is to generate a NEW, unique question based on the QCM description and the provided document content (if any). " | |
| "Avoid duplicating any of the existing questions. " | |
| "Provide the output in JSON format with 'question_text', 'options' (list of strings), and 'correct_option_index' (int)."), | |
| ("user", prompt_text) | |
| ]) | |
| chain = prompt | llm | parser | |
| try: | |
| result = chain.invoke({ | |
| "description": description, | |
| "existing": existing, | |
| "document_content": document_content, | |
| "format_instructions": parser.get_format_instructions() | |
| }) | |
| return {"generated_question": result} | |
| except Exception as e: | |
| # Fallback or error handling | |
| print(f"Error generating question: {e}") | |
| return {"generated_question": None} | |
| # Build the graph | |
| workflow = StateGraph(AgentState) | |
| workflow.add_node("generate_question", generate_question_node) | |
| workflow.set_entry_point("generate_question") | |
| workflow.add_edge("generate_question", END) | |
| app = workflow.compile() | |