david167's picture
Fix version conflict: use PyTorch 2.5.0 + TorchVision 0.20.0 exact match
67f9bcb
raw
history blame
12.3 kB
import os
import logging
from typing import List, Optional, Dict, Any
from contextlib import asynccontextmanager
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import uvicorn
from fastapi import FastAPI, HTTPException, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
import gc
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global variables for model and tokenizer
model = None
tokenizer = None
device = None
class QuestionGenerationRequest(BaseModel):
statement: str = Field(..., description="The input statement to generate questions from")
num_questions: int = Field(default=5, ge=1, le=10, description="Number of questions to generate (1-10)")
temperature: float = Field(default=0.8, ge=0.1, le=2.0, description="Temperature for generation (0.1-2.0)")
max_length: int = Field(default=2048, ge=100, le=4096, description="Maximum length of generated text")
difficulty_level: str = Field(default="mixed", description="Difficulty level: easy, medium, hard, or mixed")
class QuestionGenerationResponse(BaseModel):
questions: List[str]
statement: str
metadata: Dict[str, Any]
class HealthResponse(BaseModel):
status: str
model_loaded: bool
device: str
memory_usage: Dict[str, float]
async def load_model():
"""Load the model and tokenizer"""
global model, tokenizer, device
try:
logger.info("Starting model loading...")
# Check if CUDA is available
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using device: {device}")
if device == "cuda":
logger.info(f"GPU: {torch.cuda.get_device_name()}")
logger.info(f"VRAM Available: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")
model_name = "DavidAU/Llama-3.1-1-million-ctx-DeepHermes-Deep-Reasoning-8B-GGUF"
model_file = "Llama-3.1-1-million-ctx-DeepHermes-Deep-Reasoning-8B-Q4_K_M.gguf"
# Use transformers library instead of llama-cpp-python
try:
from transformers import AutoTokenizer, AutoModelForCausalLM
logger.info("Loading model with transformers...")
# Use Llama 3.1 8B Instruct (user now has access)
base_model_name = "meta-llama/Llama-3.1-8B-Instruct"
# Get HF token from environment
hf_token = os.getenv("HF_TOKEN")
tokenizer = AutoTokenizer.from_pretrained(
base_model_name,
use_fast=True,
trust_remote_code=True,
token=hf_token
)
model = AutoModelForCausalLM.from_pretrained(
base_model_name,
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
device_map="auto" if device == "cuda" else None,
trust_remote_code=True,
low_cpu_mem_usage=True,
use_safetensors=True, # Force safetensors to avoid CVE-2025-32434 (PyTorch 2.5.0 vulnerable to torch.load RCE)
token=hf_token
)
if device == "cuda":
model = model.to(device)
logger.info("Model loaded successfully with transformers!")
except Exception as e:
logger.error(f"Error loading model with transformers: {str(e)}")
raise # Re-raise the error to stop startup if primary model fails
except Exception as e:
logger.error(f"Error loading model: {str(e)}")
raise
async def unload_model():
"""Clean up model from memory"""
global model, tokenizer
try:
if model is not None:
del model
if tokenizer is not None:
del tokenizer
# Clear CUDA cache if available
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Force garbage collection
gc.collect()
logger.info("Model unloaded successfully")
except Exception as e:
logger.error(f"Error unloading model: {str(e)}")
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Manage application lifespan"""
# Startup
logger.info("Starting up...")
await load_model()
yield
# Shutdown
logger.info("Shutting down...")
await unload_model()
# Create FastAPI app
app = FastAPI(
title="Question Generation API",
description="API for generating questions from statements using DeepHermes reasoning model",
version="1.0.0",
lifespan=lifespan
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def create_question_prompt(statement: str, num_questions: int, difficulty_level: str) -> str:
"""Create a prompt for question generation with reasoning"""
difficulty_instruction = {
"easy": "Generate simple, straightforward questions that test basic understanding.",
"medium": "Generate questions that require some analysis and comprehension.",
"hard": "Generate complex questions that require deep thinking and reasoning.",
"mixed": "Generate a mix of easy, medium, and hard questions."
}
system_prompt = """You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside <think> </think> tags, and then provide your solution or response to the problem.
You are an expert educator and question generator. Your task is to create thoughtful, well-crafted questions from given statements."""
user_prompt = f"""<think>
I need to analyze this statement and generate {num_questions} high-quality questions. Let me think about:
1. The key concepts and information in the statement
2. Different types of questions I can ask (factual, analytical, inferential, evaluative)
3. The difficulty level requested: {difficulty_level}
4. How to make questions that promote understanding and critical thinking
</think>
Based on the following statement, generate exactly {num_questions} questions.
Statement: "{statement}"
Requirements:
- {difficulty_instruction[difficulty_level]}
- Questions should be clear, well-formed, and grammatically correct
- Vary the question types (what, how, why, when, where, etc.)
- Each question should test different aspects of the statement
- Make questions engaging and thought-provoking
- Number each question (1., 2., 3., etc.)
Generate the questions now:"""
return f"{system_prompt}\n\n{user_prompt}"
def extract_questions(generated_text: str) -> List[str]:
"""Extract questions from the generated text"""
questions = []
lines = generated_text.split('\n')
for line in lines:
line = line.strip()
# Look for numbered questions
if line and (line[0].isdigit() or line.startswith('Q')):
# Remove numbering and clean up
question = line
# Remove common prefixes
for prefix in ['1.', '2.', '3.', '4.', '5.', '6.', '7.', '8.', '9.', '10.', 'Q1:', 'Q2:', 'Q3:', 'Q4:', 'Q5:', 'Question 1:', 'Question 2:', 'Question 3:', 'Question 4:', 'Question 5:']:
if question.startswith(prefix):
question = question[len(prefix):].strip()
break
if question and question.endswith('?'):
questions.append(question)
# If no numbered questions found, try to extract any questions
if not questions:
for line in lines:
line = line.strip()
if line.endswith('?') and len(line) > 10:
questions.append(line)
return questions
@app.get("/health", response_model=HealthResponse)
async def health_check():
"""Health check endpoint"""
global model
memory_usage = {}
if torch.cuda.is_available():
memory_usage = {
"allocated_gb": torch.cuda.memory_allocated() / 1024**3,
"reserved_gb": torch.cuda.memory_reserved() / 1024**3,
"total_gb": torch.cuda.get_device_properties(0).total_memory / 1024**3
}
return HealthResponse(
status="healthy" if model is not None else "unhealthy",
model_loaded=model is not None,
device=device if device else "unknown",
memory_usage=memory_usage
)
@app.post("/generate-questions", response_model=QuestionGenerationResponse)
async def generate_questions(request: QuestionGenerationRequest):
"""Generate questions from a statement"""
global model
if model is None:
raise HTTPException(status_code=503, detail="Model not loaded")
try:
logger.info(f"Generating {request.num_questions} questions for statement: {request.statement[:100]}...")
# Create prompt
prompt = create_question_prompt(
request.statement,
request.num_questions,
request.difficulty_level
)
# Generate response using transformers
inputs = tokenizer.encode(prompt, return_tensors="pt")
if device == "cuda":
inputs = inputs.to(device)
with torch.no_grad():
outputs = model.generate(
inputs,
max_new_tokens=request.max_length,
temperature=request.temperature,
top_p=0.95,
top_k=40,
repetition_penalty=1.1,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
# Decode the generated text
full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove the input prompt from the response
generated_text = full_response[len(prompt):].strip()
logger.info(f"Generated text length: {len(generated_text)}")
# Extract questions from the generated text
questions = extract_questions(generated_text)
# Ensure we have the requested number of questions
if len(questions) < request.num_questions:
logger.warning(f"Only extracted {len(questions)} questions, requested {request.num_questions}")
# Limit to requested number
questions = questions[:request.num_questions]
# If we still don't have enough questions, add a fallback
while len(questions) < request.num_questions:
questions.append(f"What is the main point of this statement: '{request.statement[:100]}...'?")
metadata = {
"model": "DavidAU/Llama-3.1-1-million-ctx-DeepHermes-Deep-Reasoning-8B-GGUF",
"temperature": request.temperature,
"difficulty_level": request.difficulty_level,
"generated_text_length": len(generated_text),
"questions_extracted": len(questions)
}
logger.info(f"Successfully generated {len(questions)} questions")
return QuestionGenerationResponse(
questions=questions,
statement=request.statement,
metadata=metadata
)
except Exception as e:
logger.error(f"Error generating questions: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error generating questions: {str(e)}")
@app.get("/")
async def root():
"""Root endpoint with basic info"""
return {
"message": "Question Generation API",
"model": "DavidAU/Llama-3.1-1-million-ctx-DeepHermes-Deep-Reasoning-8B-GGUF",
"endpoints": {
"health": "/health",
"generate": "/generate-questions",
"docs": "/docs"
}
}
if __name__ == "__main__":
uvicorn.run(
"app:app",
host="0.0.0.0",
port=7860,
reload=False
)