matsuap's picture
Upload folder using huggingface_hub
951d5c6 verified
import json
import logging
import os
import asyncio
import tempfile
from typing import List, Dict, Optional, Any, Callable
import openai
from core.config import settings
from core.prompts import get_quiz_system_prompt
from services.s3_service import s3_service
logger = logging.getLogger(__name__)
class QuizService:
def __init__(self):
self.openai_client = openai.OpenAI(api_key=settings.OPENAI_API_KEY)
async def generate_quiz(
self,
file_key: Optional[str] = None,
text_input: Optional[str] = None,
difficulty: str = "medium",
topic: Optional[str] = None,
language: str = "English",
count_mode: str = "STANDARD",
progress_callback: Optional[Callable[[int, str], None]] = None
) -> List[Dict[str, Any]]:
"""
Generates a quiz from either an S3 PDF or direct text input.
Uses asyncio.to_thread for all blocking I/O operations.
"""
try:
if progress_callback:
progress_callback(5, "Preparing quiz generation...")
# Map count mode to actual numbers
counts = {
"FEWER": "5-10",
"STANDARD": "10-15",
"MORE": "20-25"
}
num_range = counts.get(count_mode, "10-15")
system_prompt = get_quiz_system_prompt(language).replace("{NUM_QUESTIONS}", num_range)
if file_key:
if progress_callback:
progress_callback(15, "Downloading file from S3...")
# Download PDF from S3 (non-blocking)
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
tmp_path = tmp.name
tmp.close()
try:
await asyncio.to_thread(
s3_service.s3_client.download_file,
settings.AWS_S3_BUCKET,
file_key,
tmp_path
)
if progress_callback:
progress_callback(30, "Uploading to OpenAI...")
# Upload to OpenAI (non-blocking)
def upload_to_openai():
with open(tmp_path, "rb") as f:
return self.openai_client.files.create(
file=f,
purpose="assistants"
)
uploaded_file = await asyncio.to_thread(upload_to_openai)
if progress_callback:
progress_callback(45, "Generating quiz questions...")
user_message = f"Analyze the PDF and create {num_range} questions. Difficulty: {difficulty}."
if topic:
user_message += f" Topic: {topic}."
messages = [
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": [
{"type": "text", "text": user_message},
{
"type": "file",
"file": {"file_id": uploaded_file.id}
}
]
}
]
# Call OpenAI API (non-blocking)
response = await asyncio.to_thread(
self.openai_client.chat.completions.create,
model="gpt-4o-mini",
messages=messages,
response_format={"type": "json_object"},
temperature=0.7
)
if progress_callback:
progress_callback(75, "Cleaning up...")
# Clean up (non-blocking)
await asyncio.to_thread(
self.openai_client.files.delete,
uploaded_file.id
)
raw_content = response.choices[0].message.content
finally:
if os.path.exists(tmp_path):
await asyncio.to_thread(os.remove, tmp_path)
elif text_input:
if progress_callback:
progress_callback(20, "Generating quiz questions...")
user_message = f"Analyze the text and create {num_range} questions. Difficulty: {difficulty}."
if topic:
user_message += f" Topic: {topic}."
user_message += f"\n\nText content:\n{text_input}"
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_message}
]
# Call OpenAI API (non-blocking)
response = await asyncio.to_thread(
self.openai_client.chat.completions.create,
model="gpt-4o-mini",
messages=messages,
response_format={"type": "json_object"},
temperature=0.7
)
raw_content = response.choices[0].message.content
else:
raise ValueError("Either file_key or text_input must be provided")
if progress_callback:
progress_callback(85, "Parsing results...")
data = json.loads(raw_content)
# The prompt asks for {"quizzes": [...]}
return data.get("quizzes", [])
except Exception as e:
logger.error(f"Quiz generation failed: {e}")
raise
quiz_service = QuizService()