File size: 1,048 Bytes
c2eb311 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | from PIL import Image
import io
import torch
import torch.nn as nn
import torchvision.transforms as transforms
class EndpointHandler:
def __init__(self, model_path: str):
# ładujemy model
from your_model_file import Net # import architektury
self.model = Net()
self.model.load_state_dict(torch.load(model_path, map_location="cpu"))
self.model.eval()
self.transform = transforms.Compose([
transforms.Grayscale(),
transforms.Resize((28,28)),
transforms.ToTensor(),
])
def __call__(self, data: dict) -> dict:
# data["inputs"] może być bajtami obrazu
image_bytes = data["inputs"]
image = Image.open(io.BytesIO(image_bytes)).convert("L")
tensor = self.transform(image).unsqueeze(0)
with torch.no_grad():
logits = self.model(tensor)
scores = torch.softmax(logits, dim=1)[0].tolist()
# zwróć listę etykiety-score
return {"scores": scores}
|