Digambar29's picture
Production stage 1
87cc891
# inference.py
import torch
import torch.nn as nn
from torchvision import models, transforms
from PIL import Image
from pathlib import Path
# device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# resolve model path safely
BASE_DIR = Path(__file__).resolve().parent
MODEL_PATH = BASE_DIR / "emotion_recognition_model.pth"
# load checkpoint
checkpoint = torch.load(MODEL_PATH, map_location=device)
classes = checkpoint["classes"]
# recreate model
model = models.mobilenet_v2(pretrained=False)
model.classifier[1] = nn.Linear(1280, len(classes))
model.load_state_dict(checkpoint["model_state"])
model.to(device)
model.eval()
# preprocessing
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
@torch.no_grad()
def predict(pil_image: Image.Image):
x = transform(pil_image).unsqueeze(0).to(device)
logits = model(x)
probs = torch.softmax(logits, dim=1)
conf, idx = probs.max(dim=1)
return {
"emotion": classes[idx.item()],
"confidence": conf.item()
}