feat: quiz generator endpoint added
Browse files- Backend/app/api/v1/endpoints/prompts.py +4 -7
- Backend/app/api/v1/endpoints/quiz.py +43 -34
- Backend/app/config.py +2 -0
- Backend/app/llm.py +35 -0
- Backend/app/main.py +2 -0
- Backend/app/schema/__init__.py +2 -2
- Backend/app/schema/models.py +12 -2
- Backend/app/services/inital_data.py +0 -14
Backend/app/api/v1/endpoints/prompts.py
CHANGED
|
@@ -1,12 +1,9 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
SYSTEM_PROMPT = """
|
| 4 |
You are an AI question-generation agent.
|
| 5 |
Your task is to generate a batch of 10 high-quality MCQ questions strictly based on the following inputs:
|
| 6 |
|
| 7 |
- {parsed_info}
|
| 8 |
- {user_prompt}
|
| 9 |
-
- {mcq_style}
|
| 10 |
- {retrieved_docs}
|
| 11 |
|
| 12 |
-----------------------
|
|
@@ -25,8 +22,8 @@ GENERATION RULES
|
|
| 25 |
-----------------------
|
| 26 |
REQUIRED JSON FORMAT FOR EACH QUESTION
|
| 27 |
-----------------------
|
| 28 |
-
{
|
| 29 |
-
"
|
| 30 |
"options": [
|
| 31 |
"rm",
|
| 32 |
"mv",
|
|
@@ -34,9 +31,9 @@ REQUIRED JSON FORMAT FOR EACH QUESTION
|
|
| 34 |
"none of the mentioned"
|
| 35 |
],
|
| 36 |
"answer": "b",
|
| 37 |
-
"explanation": "
|
| 38 |
"User_response": ""
|
| 39 |
-
}
|
| 40 |
|
| 41 |
-----------------------
|
| 42 |
ANSWER KEY RULES
|
|
|
|
|
|
|
|
|
|
| 1 |
SYSTEM_PROMPT = """
|
| 2 |
You are an AI question-generation agent.
|
| 3 |
Your task is to generate a batch of 10 high-quality MCQ questions strictly based on the following inputs:
|
| 4 |
|
| 5 |
- {parsed_info}
|
| 6 |
- {user_prompt}
|
|
|
|
| 7 |
- {retrieved_docs}
|
| 8 |
|
| 9 |
-----------------------
|
|
|
|
| 22 |
-----------------------
|
| 23 |
REQUIRED JSON FORMAT FOR EACH QUESTION
|
| 24 |
-----------------------
|
| 25 |
+
{{
|
| 26 |
+
"questions": "Which of the following CLI command can also be used to rename files?",
|
| 27 |
"options": [
|
| 28 |
"rm",
|
| 29 |
"mv",
|
|
|
|
| 31 |
"none of the mentioned"
|
| 32 |
],
|
| 33 |
"answer": "b",
|
| 34 |
+
"explanation": "mv stands for move.",
|
| 35 |
"User_response": ""
|
| 36 |
+
}}
|
| 37 |
|
| 38 |
-----------------------
|
| 39 |
ANSWER KEY RULES
|
Backend/app/api/v1/endpoints/quiz.py
CHANGED
|
@@ -1,56 +1,65 @@
|
|
| 1 |
-
from fastapi import APIRouter, Depends, HTTPException
|
| 2 |
from chromadb import AsyncHttpClient
|
| 3 |
from app.models import User
|
| 4 |
from app.api.deps import get_db, get_current_user, get_chroma_client
|
| 5 |
-
from app.schema import Quiz_input
|
| 6 |
from .prompts import SYSTEM_PROMPT
|
| 7 |
-
|
| 8 |
from fastapi import APIRouter, Depends, HTTPException
|
| 9 |
from chromadb.api.models.Collection import Collection # Import Collection type
|
| 10 |
from app.api.deps import get_chroma_collection
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
-
@router.get("/")
|
| 16 |
async def search_documents(
|
| 17 |
-
query: str,
|
| 18 |
-
|
| 19 |
-
collection: Collection = Depends(get_chroma_collection)
|
| 20 |
):
|
| 21 |
try:
|
| 22 |
-
|
| 23 |
-
results = await collection.query(
|
| 24 |
-
query_texts=[query],
|
| 25 |
-
n_results=5
|
| 26 |
-
)
|
| 27 |
-
return results
|
| 28 |
except Exception as e:
|
| 29 |
-
raise HTTPException(
|
| 30 |
-
|
| 31 |
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
# if Input_model.parsed_doc and Input_model.user_prompt and Input_model.choice:
|
| 41 |
-
# prompt = prompt_builder(Input_model.parsed_doc, Input_model.user_prompt, Input_model.choice)
|
| 42 |
|
|
|
|
| 43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
# #--------Helper Functions--------#
|
| 46 |
|
| 47 |
-
# def get_embed()
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
# )
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Depends, HTTPException, status
|
| 2 |
from chromadb import AsyncHttpClient
|
| 3 |
from app.models import User
|
| 4 |
from app.api.deps import get_db, get_current_user, get_chroma_client
|
| 5 |
+
from app.schema import Quiz_input, QuizOutput
|
| 6 |
from .prompts import SYSTEM_PROMPT
|
|
|
|
| 7 |
from fastapi import APIRouter, Depends, HTTPException
|
| 8 |
from chromadb.api.models.Collection import Collection # Import Collection type
|
| 9 |
from app.api.deps import get_chroma_collection
|
| 10 |
+
from app.llm import call_llm
|
| 11 |
+
router = APIRouter(prefix="/quiz")
|
| 12 |
|
| 13 |
+
async def search_logic(query: str, collection: Collection):
|
| 14 |
+
results = await collection.query(
|
| 15 |
+
query_texts=[query],
|
| 16 |
+
n_results=5
|
| 17 |
+
)
|
| 18 |
+
return ''.join(results['documents'][0])
|
| 19 |
|
| 20 |
+
@router.get("/search_docs")
|
| 21 |
async def search_documents(
|
| 22 |
+
query: str,
|
| 23 |
+
collection: Collection = Depends(get_chroma_collection)
|
|
|
|
| 24 |
):
|
| 25 |
try:
|
| 26 |
+
return await search_logic(query, collection)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
except Exception as e:
|
| 28 |
+
raise HTTPException(500, f"ChromaDB Query Error: {e}")
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
+
@router.post("/", response_model=QuizOutput, status_code=status.HTTP_201_CREATED)
|
| 32 |
+
async def generate_quiz(
|
| 33 |
+
Input_model: Quiz_input,
|
| 34 |
+
collection: Collection = Depends(get_chroma_collection),
|
| 35 |
+
current_user: User = Depends(get_current_user)
|
| 36 |
+
):
|
| 37 |
+
try:
|
| 38 |
+
query = Input_model.parsed_doc + Input_model.user_prompt
|
| 39 |
+
retrieved_context = await search_logic(query, collection)
|
| 40 |
+
|
| 41 |
|
| 42 |
+
if not retrieved_context:
|
| 43 |
+
raise ValueError("No context available to generate quiz.")
|
| 44 |
+
prompt = await prompt_builder(Input_model.parsed_doc, Input_model.user_prompt, retrieved_context)
|
| 45 |
+
|
| 46 |
+
quiz_data_obj = await call_llm(prompt)
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
+
return quiz_data_obj
|
| 49 |
|
| 50 |
+
except Exception as e:
|
| 51 |
+
raise HTTPException(
|
| 52 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 53 |
+
detail=f'Invalid Input: {str(e)}'
|
| 54 |
+
)
|
| 55 |
|
| 56 |
# #--------Helper Functions--------#
|
| 57 |
|
|
|
|
| 58 |
|
| 59 |
+
async def prompt_builder(parsed_doc:str, user_prompt:str, docs:str=None):
|
| 60 |
+
prompt = SYSTEM_PROMPT.format(
|
| 61 |
+
parsed_info=parsed_doc,
|
| 62 |
+
user_prompt=user_prompt,
|
| 63 |
+
retrieved_docs=docs
|
| 64 |
+
)
|
| 65 |
+
return prompt
|
|
|
Backend/app/config.py
CHANGED
|
@@ -16,6 +16,8 @@ class Settings(BaseSettings):
|
|
| 16 |
chroma_port: int
|
| 17 |
chroma_collection: str
|
| 18 |
|
|
|
|
|
|
|
| 19 |
class Config:
|
| 20 |
env_file = ".env"
|
| 21 |
extra = "ignore" # quiz
|
|
|
|
| 16 |
chroma_port: int
|
| 17 |
chroma_collection: str
|
| 18 |
|
| 19 |
+
GEMINI_API_KEY:str
|
| 20 |
+
|
| 21 |
class Config:
|
| 22 |
env_file = ".env"
|
| 23 |
extra = "ignore" # quiz
|
Backend/app/llm.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from openai import OpenAI
|
| 3 |
+
from pydantic import BaseModel, Field
|
| 4 |
+
from typing import List, Optional, Any
|
| 5 |
+
from app.schema.models import QuizOutput, QuizQuestion
|
| 6 |
+
from app.config import settings
|
| 7 |
+
|
| 8 |
+
client = OpenAI(
|
| 9 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
|
| 10 |
+
api_key="AIzaSyAIZJOjjq87FDmW9sVoTuvPkwnmfFWtfNE",
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
async def call_llm(prompt:str):
|
| 14 |
+
try:
|
| 15 |
+
response = client.chat.completions.create(
|
| 16 |
+
# CRUCIAL: Use the LiteLLM format: 'gemini/gemini-2.5-pro'
|
| 17 |
+
model="models/gemini-2.0-flash",
|
| 18 |
+
messages=[
|
| 19 |
+
{"role": "user", "content": prompt}
|
| 20 |
+
],
|
| 21 |
+
# Use the OpenAI parameter to request JSON output
|
| 22 |
+
response_format={"type": "json_object"},
|
| 23 |
+
temperature=0.7,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
json_string = response.choices[0].message.content
|
| 27 |
+
|
| 28 |
+
import json
|
| 29 |
+
quiz_data = json.loads(json_string)
|
| 30 |
+
wrapped_data = {"quiz": quiz_data}
|
| 31 |
+
return QuizOutput.model_validate(wrapped_data)
|
| 32 |
+
|
| 33 |
+
except Exception as e:
|
| 34 |
+
print(f"Error calling LiteLLM/Gemini: {e}")
|
| 35 |
+
raise e
|
Backend/app/main.py
CHANGED
|
@@ -7,7 +7,9 @@ from app.database import engine, Base
|
|
| 7 |
from app.api.v1.api import api_router
|
| 8 |
import chromadb
|
| 9 |
from chromadb.api.models.Collection import Collection
|
|
|
|
| 10 |
|
|
|
|
| 11 |
|
| 12 |
@asynccontextmanager
|
| 13 |
async def lifespan(app: FastAPI):
|
|
|
|
| 7 |
from app.api.v1.api import api_router
|
| 8 |
import chromadb
|
| 9 |
from chromadb.api.models.Collection import Collection
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
|
| 12 |
+
load_dotenv()
|
| 13 |
|
| 14 |
@asynccontextmanager
|
| 15 |
async def lifespan(app: FastAPI):
|
Backend/app/schema/__init__.py
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
-
from app.schema.models import StudentCreate, StudentUpdate, StudentResponse, UserCreate, Token, LoginRequest, Quiz_input
|
| 2 |
|
| 3 |
-
__all__ = ["StudentCreate", "StudentUpdate", "StudentResponse", "UserCreate", "Token", "LoginRequest", "Quiz_input"]
|
|
|
|
| 1 |
+
from app.schema.models import StudentCreate, StudentUpdate, StudentResponse, UserCreate, Token, LoginRequest, Quiz_input, QuizOutput
|
| 2 |
|
| 3 |
+
__all__ = ["StudentCreate", "StudentUpdate", "StudentResponse", "UserCreate", "Token", "LoginRequest", "Quiz_input", "QuizOutput"]
|
Backend/app/schema/models.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
from pydantic import BaseModel, EmailStr, Field, field_validator, ConfigDict
|
| 2 |
-
from typing import Optional, Literal
|
| 3 |
from datetime import datetime
|
| 4 |
|
| 5 |
class StudentBase(BaseModel):
|
|
@@ -51,4 +51,14 @@ class LoginResponse(Token):
|
|
| 51 |
class Quiz_input(BaseModel):
|
| 52 |
parsed_doc: str
|
| 53 |
user_prompt: str
|
| 54 |
-
choice: Literal["mcq", "code"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from pydantic import BaseModel, EmailStr, Field, field_validator, ConfigDict
|
| 2 |
+
from typing import Optional, Literal, List
|
| 3 |
from datetime import datetime
|
| 4 |
|
| 5 |
class StudentBase(BaseModel):
|
|
|
|
| 51 |
class Quiz_input(BaseModel):
|
| 52 |
parsed_doc: str
|
| 53 |
user_prompt: str
|
| 54 |
+
# choice: Literal["mcq", "code"]
|
| 55 |
+
|
| 56 |
+
class QuizQuestion(BaseModel):
|
| 57 |
+
questions: str
|
| 58 |
+
options: List[str] = Field(..., min_items=4, max_items=4)
|
| 59 |
+
answer: str = Field(..., description="The correct answer key (e.g., 'a', 'b', 'c', or 'd')")
|
| 60 |
+
explanation: str
|
| 61 |
+
user_response: str = Field("", alias="User_response")
|
| 62 |
+
|
| 63 |
+
class QuizOutput(BaseModel):
|
| 64 |
+
quiz: List[QuizQuestion] = Field(..., description="A list of 10 generated MCQ questions.")
|
Backend/app/services/inital_data.py
DELETED
|
@@ -1,14 +0,0 @@
|
|
| 1 |
-
from chromadb import AsyncHttpClient
|
| 2 |
-
from chromadb.utils import embedding_functions
|
| 3 |
-
|
| 4 |
-
async def ingest_start(client: AsyncHttpClient, collection_name:str):
|
| 5 |
-
|
| 6 |
-
try:
|
| 7 |
-
await client.get_collection(name=collection_name)
|
| 8 |
-
print(f"Collection '{collection_name}' already exists. Skipping initial ingestion.")
|
| 9 |
-
return
|
| 10 |
-
except Exception:
|
| 11 |
-
print(f"Collection '{collection_name}' not found. Starting initial ingestion...")
|
| 12 |
-
pass
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|