food-api / app.py
Ajay Singh Rathore
changed the model to the custom model v29 full
b42ff5c
from fastapi import FastAPI, UploadFile, File
from transformers import AutoImageProcessor, AutoModelForImageClassification
from PIL import Image
import torch
import io
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI()
# ✅ working model on HuggingFace
MODEL_NAME = "Zodex/my-final-food-model-v29-full"
# Load the processor and model
try:
logger.info(f"Loading model '{MODEL_NAME}'...")
processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
model = AutoModelForImageClassification.from_pretrained(MODEL_NAME)
model.eval() # ✅ just to be safe
logger.info("Model and processor loaded successfully!")
except Exception as e:
logger.error(f"Failed to load model: {str(e)}")
raise
@app.get("/")
async def read_root():
return {"message": "Food Recognition API is running."}
@app.post("/predict")
async def predict_image(file: UploadFile = File(...)):
try:
contents = await file.read()
image = Image.open(io.BytesIO(contents)).convert("RGB")
# Preprocess
inputs = processor(images=image, return_tensors="pt")
# Get model predictions
with torch.no_grad():
logits = model(**inputs).logits
probabilities = torch.nn.functional.softmax(logits, dim=-1)
top_k = 3
values, indices = torch.topk(logits, top_k)
results = []
for i in range(top_k):
pred_index = indices[0][i].item()
label = model.config.id2label[pred_index]
score = float(probabilities[0][pred_index].item())
results.append({
"food_name": label.replace("_", " ").title(),
"confidence": round(score, 4),
})
return {"success": True, "predictions": results}
except Exception as e:
logger.error(f"Error processing image: {str(e)}")
return {"success": False, "error": str(e)}