File size: 4,161 Bytes
f599fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0356e4b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# main.py

import logging
from contextlib import asynccontextmanager
import torch
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM

# --- Configuration ---
# The repository ID for your model on the Hugging Face Hub
HF_REPO_ID = "rxmha125/Rx_Codex_V1_Tiny_test"
# Use GPU if available (CUDA), otherwise fallback to CPU
MODEL_LOAD_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

# --- Logging Setup ---
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# --- Global variables to hold the model and tokenizer ---
model = None
tokenizer = None

# --- Application Lifespan (Model Loading) ---
@asynccontextmanager
async def lifespan(app: FastAPI):
    global model, tokenizer
    logger.info(f"API Startup: Loading model '{HF_REPO_ID}' to device '{MODEL_LOAD_DEVICE}'...")
    
    # Load the tokenizer from the Hub
    try:
        tokenizer = AutoTokenizer.from_pretrained(HF_REPO_ID)
        logger.info("✅ Tokenizer loaded successfully.")
    except Exception as e:
        logger.error(f"❌ FATAL: Tokenizer loading failed: {e}")
        # In a real app, you might want to handle this more gracefully
        # For Spaces, it will just fail to start, which is okay.

    # Load the model from the Hub
    try:
        model = AutoModelForCausalLM.from_pretrained(HF_REPO_ID)
        model.to(MODEL_LOAD_DEVICE)
        model.eval() # Set to evaluation mode for inference
        logger.info("✅ Model loaded successfully.")
    except Exception as e:
        logger.error(f"❌ FATAL: Model loading failed: {e}")

    yield # The API is now running

    # --- Code below this line runs on shutdown ---
    logger.info("API Shutting down.")
    model = None
    tokenizer = None


# --- Initialize FastAPI ---
app = FastAPI(
    title="Rx Codex V1-Tiny API",
    description="An API for generating text with the Rx_Codex_V1_Tiny model.",
    lifespan=lifespan
)

# --- Pydantic Models for API Data Validation ---
class GenerationRequest(BaseModel):
    prompt: str
    max_new_tokens: int = 150
    temperature: float = 0.7
    top_k: int = 50

class GenerationResponse(BaseModel):
    generated_text: str

# --- API Endpoints ---
@app.get("/")
def root():
    """A simple endpoint to check if the API is running."""
    status = "loaded" if model and tokenizer else "not loaded"
    return {"message": "Rx Codex V1-Tiny API is running", "model_status": status}

@app.post("/generate", response_model=GenerationResponse)
async def generate_text(request: GenerationRequest):
    """The main endpoint to generate text from a prompt."""
    if not model or not tokenizer:
        raise HTTPException(status_code=503, detail="Model is not ready. Please try again later.")
    
    logger.info(f"Received generation request for prompt: '{request.prompt}'")
    
    # --- CRITICAL: Format the prompt correctly for the model ---
    formatted_prompt = f"### Human:\n{request.prompt}\n\n### Assistant:"
    
    # Prepare the input text for the model
    inputs = tokenizer(formatted_prompt, return_tensors="pt").to(MODEL_LOAD_DEVICE)
    
    # Generate text using the model
    with torch.no_grad():
        output_sequences = model.generate(
            input_ids=inputs["input_ids"],
            attention_mask=inputs["attention_mask"],
            max_new_tokens=request.max_new_tokens,
            temperature=request.temperature,
            top_k=request.top_k,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id
        )
    
    # Decode the generated tokens back into text
    full_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
    
    # Remove the original formatted prompt from the output to return only the new text
    generated_text = full_text[len(formatted_prompt):].strip()

    logger.info("Generation complete.")
    return GenerationResponse(generated_text=generated_text)


# --- Uvicorn Runner (for local testing) ---
if __name__ == "__main__":
    import uvicorn
    logger.info("Starting API locally via Uvicorn...")
    uvicorn.run(app, host="0.0.0.0", port=8000)