Spaces:
Configuration error
Configuration error
File size: 1,169 Bytes
8c3edd8 8585b93 8c3edd8 8585b93 40cf0fb 8585b93 40cf0fb 8c3edd8 8585b93 8c3edd8 87cc891 8c3edd8 87cc891 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | # inference.py
import torch
import torch.nn as nn
from torchvision import models, transforms
from PIL import Image
from pathlib import Path
# device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# resolve model path safely
BASE_DIR = Path(__file__).resolve().parent
MODEL_PATH = BASE_DIR / "emotion_recognition_model.pth"
# load checkpoint
checkpoint = torch.load(MODEL_PATH, map_location=device)
classes = checkpoint["classes"]
# recreate model
model = models.mobilenet_v2(pretrained=False)
model.classifier[1] = nn.Linear(1280, len(classes))
model.load_state_dict(checkpoint["model_state"])
model.to(device)
model.eval()
# preprocessing
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
@torch.no_grad()
def predict(pil_image: Image.Image):
x = transform(pil_image).unsqueeze(0).to(device)
logits = model(x)
probs = torch.softmax(logits, dim=1)
conf, idx = probs.max(dim=1)
return {
"emotion": classes[idx.item()],
"confidence": conf.item()
}
|