rxmha125's picture
Upload 3 files
d95d4a2 verified
# app.py
import logging
import time
from contextlib import asynccontextmanager
import torch
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM
# --- Configuration ---
# Your Hugging Face model repository
HF_REPO_ID = "rxmha125/RxCodexV1-mini"
# Use GPU if available (CUDA), otherwise fallback to CPU
MODEL_LOAD_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# --- Logging Setup ---
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# --- Global variables to hold the model and tokenizer ---
# These will be loaded during the application's startup.
model = None
tokenizer = None
# --- Application Lifespan (Model Loading) ---
# This special function runs code when the API starts up and shuts down.
@asynccontextmanager
async def lifespan(app: FastAPI):
global model, tokenizer
logger.info(f"API Startup: Loading model '{HF_REPO_ID}' to device '{MODEL_LOAD_DEVICE}'...")
# Load the tokenizer from Hugging Face
try:
tokenizer = AutoTokenizer.from_pretrained(HF_REPO_ID)
# Set a padding token if it's not already set
if tokenizer.pad_token is None and tokenizer.eos_token:
tokenizer.pad_token = tokenizer.eos_token
logger.info("βœ… Tokenizer loaded successfully.")
except Exception as e:
logger.error(f"❌ FATAL: Tokenizer loading failed: {e}")
# Load the model from Hugging Face
try:
model = AutoModelForCausalLM.from_pretrained(HF_REPO_ID)
model.to(MODEL_LOAD_DEVICE) # Move the model to the correct device (CPU or GPU)
model.eval() # Set the model to evaluation mode (important for inference)
logger.info("βœ… Model loaded successfully.")
except Exception as e:
logger.error(f"❌ FATAL: Model loading failed: {e}")
yield # The API is now running
# --- Code below this line runs on shutdown ---
logger.info("API Shutting down.")
model = None
tokenizer = None
# --- Initialize FastAPI ---
# The 'lifespan' function is linked here to ensure the model loads on startup.
app = FastAPI(
title="Rx Codex V1-mini Simple API",
description="A simplified API for text generation without authentication.",
lifespan=lifespan
)
# --- Pydantic Models for API Data Validation ---
# Defines the structure of the incoming request body
class GenerationRequest(BaseModel):
prompt: str
max_new_tokens: int = 50
# Defines the structure of the outgoing response
class GenerationResponse(BaseModel):
generated_text: str
# --- API Endpoints ---
@app.get("/")
def root():
"""A simple endpoint to check if the API is running."""
status = "loaded" if model and tokenizer else "not loaded"
return {"message": "Rx Codex API is running", "model_status": status}
@app.post("/generate", response_model=GenerationResponse)
async def generate_text(request: GenerationRequest):
"""The main endpoint to generate text from a prompt."""
if not model or not tokenizer:
raise HTTPException(status_code=503, detail="Model is not ready. Please try again later.")
logger.info(f"Received generation request for prompt: '{request.prompt}'")
# Prepare the input text for the model
inputs = tokenizer(request.prompt, return_tensors="pt", padding=True, truncation=True)
input_ids = inputs["input_ids"].to(MODEL_LOAD_DEVICE)
# Generate text using the model
with torch.no_grad():
output_sequences = model.generate(
input_ids=input_ids,
max_new_tokens=request.max_new_tokens,
pad_token_id=tokenizer.pad_token_id,
)
# Decode the generated tokens back into text
generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
# Simple cleanup to remove the original prompt from the output
if generated_text.lower().startswith(request.prompt.lower()):
generated_text = generated_text[len(request.prompt):].strip()
logger.info("Generation complete.")
return GenerationResponse(generated_text=generated_text)
# --- Uvicorn Runner ---
# This allows you to run the app directly with 'python app.py'
if __name__ == "__main__":
import uvicorn
logger.info("Starting API via Uvicorn...")
uvicorn.run(app, host="0.0.0.0", port=8000)