Spaces:
Sleeping
Sleeping
File size: 2,296 Bytes
b98ed7e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
"""Main FastAPI application for Hugging Face inference API."""
import logging
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from .config import get_settings
from .inference import InferenceEngine
from .models import HealthResponse, InferenceRequest, InferenceResponse
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
settings = get_settings()
engine = InferenceEngine(settings)
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Handle application startup and shutdown."""
logger.info("Starting inference API...")
engine.load_model()
yield
logger.info("Shutting down inference API...")
app = FastAPI(
title="Hugging Face Inference API",
description="REST API for Hugging Face model inference",
version="1.0.0",
lifespan=lifespan,
)
@app.get("/health", response_model=HealthResponse)
async def health_check() -> HealthResponse:
"""Check API and model health status."""
return HealthResponse(
status="ok",
model_loaded=engine.model_loaded,
model_name=settings.model_name if engine.model_loaded else None,
)
@app.post("/predict", response_model=InferenceResponse)
async def predict(request: InferenceRequest) -> InferenceResponse:
"""Run inference on the provided input(s)."""
if not engine.model_loaded:
raise HTTPException(status_code=503, detail="Model not loaded")
try:
predictions = engine.predict(request.inputs, request.parameters)
return InferenceResponse(
predictions=predictions,
model_name=settings.model_name,
)
except Exception as e:
logger.exception("Inference failed")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/")
async def root():
"""Root endpoint with API information."""
return {
"name": "Hugging Face Inference API",
"version": "1.0.0",
"model": settings.model_name,
"task": settings.task,
"docs": "/docs",
}
if __name__ == "__main__":
import uvicorn
uvicorn.run(
"app.main:app",
host=settings.host,
port=settings.port,
reload=True,
)
|