dav74 commited on
Commit
d19e6fb
·
verified ·
1 Parent(s): 9b47c40

Upload 4 files

Browse files
Files changed (4) hide show
  1. agent/graph.py +82 -0
  2. main.py +19 -0
  3. requirements.txt +9 -0
  4. routers/agent.py +70 -0
agent/graph.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Optional, TypedDict
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ from langchain_core.output_parsers import JsonOutputParser
6
+ from langgraph.graph import StateGraph, END
7
+ from pydantic import BaseModel, Field
8
+
9
+ # Define the output structure for the LLM
10
+ class QuestionOutput(BaseModel):
11
+ question_text: str = Field(description="The text of the question")
12
+ options: List[str] = Field(description="A list of options including distractors and the correct answer")
13
+ correct_option_index: int = Field(description="The index of the correct option in the options list")
14
+
15
+ # Define the state of the graph
16
+ class AgentState(TypedDict):
17
+ qcm_description: str
18
+ existing_questions: List[str]
19
+ document_content: Optional[str]
20
+ generated_question: Optional[dict]
21
+
22
+ # Initialize the LLM
23
+ # Ensure OPENAI_API_KEY is set in the environment
24
+ #GOOGLE_API_KEY = ""
25
+ try:
26
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-lite", temperature=0.7, api_key=GOOGLE_API_KEY)
27
+ except Exception as e:
28
+ print(f"Warning: Failed to initialize ChatOpenAI: {e}")
29
+ llm = None
30
+
31
+ def generate_question_node(state: AgentState):
32
+ """
33
+ Generates a new question based on the description and existing questions.
34
+ """
35
+ if not llm:
36
+ return {"generated_question": None}
37
+
38
+ description = state["qcm_description"]
39
+ existing = "\n".join([f"- {q}" for q in state["existing_questions"]])
40
+
41
+ parser = JsonOutputParser(pydantic_object=QuestionOutput)
42
+
43
+ document_content = state.get("document_content", "")
44
+
45
+ prompt_text = "QCM Description: {description}\n\n"
46
+ if document_content:
47
+ prompt_text += "Document Content:\n{document_content}\n\n"
48
+
49
+ prompt_text += "Existing Questions:\n{existing}\n\n"
50
+ prompt_text += "Generate a new question:\n{format_instructions}"
51
+
52
+ prompt = ChatPromptTemplate.from_messages([
53
+ ("system", "You are an expert teacher assistant helping to create a Multiple Choice Question (QCM). "
54
+ "Your goal is to generate a NEW, unique question based on the QCM description and the provided document content (if any). "
55
+ "Avoid duplicating any of the existing questions. "
56
+ "Provide the output in JSON format with 'question_text', 'options' (list of strings), and 'correct_option_index' (int)."),
57
+ ("user", prompt_text)
58
+ ])
59
+
60
+ chain = prompt | llm | parser
61
+
62
+ try:
63
+ result = chain.invoke({
64
+ "description": description,
65
+ "existing": existing,
66
+ "document_content": document_content,
67
+ "format_instructions": parser.get_format_instructions()
68
+ })
69
+
70
+ return {"generated_question": result}
71
+ except Exception as e:
72
+ # Fallback or error handling
73
+ print(f"Error generating question: {e}")
74
+ return {"generated_question": None}
75
+
76
+ # Build the graph
77
+ workflow = StateGraph(AgentState)
78
+ workflow.add_node("generate_question", generate_question_node)
79
+ workflow.set_entry_point("generate_question")
80
+ workflow.add_edge("generate_question", END)
81
+
82
+ app = workflow.compile()
main.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from routers import agent
4
+
5
+ app = FastAPI()
6
+
7
+ app.add_middleware(
8
+ CORSMiddleware,
9
+ allow_origins=["*"],
10
+ allow_credentials=True,
11
+ allow_methods=["*"],
12
+ allow_headers=["*"],
13
+ )
14
+
15
+ app.include_router(agent.router)
16
+
17
+ @app.get("/")
18
+ def read_root():
19
+ return {"message": "Welcome to the QCM API"}
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi[standard]
2
+ pydantic
3
+ langgraph
4
+ langchain
5
+ langchain-openai
6
+ langchain-google-genai
7
+ pypdf
8
+ python-docx
9
+
routers/agent.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, HTTPException, UploadFile, File, Form
2
+ from pydantic import BaseModel
3
+ from typing import List, Optional
4
+ import json
5
+ import io
6
+ from pypdf import PdfReader
7
+ import docx
8
+ from agent.graph import app as agent_app
9
+
10
+ router = APIRouter(
11
+ prefix="/agent",
12
+ tags=["agent"],
13
+ responses={404: {"description": "Not found"}},
14
+ )
15
+
16
+ class GenerateQuestionRequest(BaseModel):
17
+ description: str
18
+ existing_questions: List[str]
19
+
20
+ class GenerateQuestionResponse(BaseModel):
21
+ question_text: str
22
+ options: List[str]
23
+ correct_option_index: int
24
+
25
+ @router.post("/generate-question", response_model=GenerateQuestionResponse)
26
+ async def generate_question(
27
+ description: str = Form(...),
28
+ existing_questions: str = Form(default="[]"),
29
+ file: Optional[UploadFile] = File(None)
30
+ ):
31
+ try:
32
+ existing_questions_list = json.loads(existing_questions)
33
+
34
+ document_content = ""
35
+ if file:
36
+ content = await file.read()
37
+ filename = file.filename.lower()
38
+
39
+ if filename.endswith(".pdf"):
40
+ pdf = PdfReader(io.BytesIO(content))
41
+ text = ""
42
+ for page in pdf.pages:
43
+ text += page.extract_text() + "\n"
44
+ document_content = text
45
+ elif filename.endswith(".docx"):
46
+ doc = docx.Document(io.BytesIO(content))
47
+ text = ""
48
+ for para in doc.paragraphs:
49
+ text += para.text + "\n"
50
+ document_content = text
51
+ elif filename.endswith(".txt") or filename.endswith(".md"):
52
+ document_content = content.decode("utf-8")
53
+
54
+ inputs = {
55
+ "qcm_description": description,
56
+ "existing_questions": existing_questions_list,
57
+ "document_content": document_content,
58
+ "generated_question": None
59
+ }
60
+
61
+ result = await agent_app.ainvoke(inputs)
62
+
63
+ generated = result.get("generated_question")
64
+ if not generated:
65
+ raise HTTPException(status_code=500, detail="Failed to generate question")
66
+
67
+ return generated
68
+ except Exception as e:
69
+ print(f"Error in generate_question: {e}")
70
+ raise HTTPException(status_code=500, detail=str(e))