|
|
import gradio as gr |
|
|
import torch |
|
|
from PIL import Image |
|
|
import torchvision.transforms as transforms |
|
|
from torchvision import models |
|
|
|
|
|
|
|
|
model = models.resnet18(weights='IMAGENET1K_V1') |
|
|
num_features = model.fc.in_features |
|
|
model.fc = torch.nn.Linear(num_features, 5) |
|
|
|
|
|
|
|
|
checkpoint = torch.load('shiva_flower_classification.pth', map_location=torch.device('cpu'), weights_only=True) |
|
|
|
|
|
|
|
|
state_dict = checkpoint |
|
|
|
|
|
|
|
|
state_dict.pop('fc.weight', None) |
|
|
state_dict.pop('fc.bias', None) |
|
|
|
|
|
|
|
|
model.load_state_dict(state_dict, strict=False) |
|
|
|
|
|
model.eval() |
|
|
|
|
|
|
|
|
classes = ['daisy', 'dandelion', 'rose', 'sunflower', 'tulip'] |
|
|
|
|
|
|
|
|
transform = transforms.Compose([ |
|
|
transforms.Resize((224, 224)), |
|
|
transforms.ToTensor(), |
|
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), |
|
|
]) |
|
|
|
|
|
|
|
|
def predict(image): |
|
|
|
|
|
image = transform(image).unsqueeze(0) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(image) |
|
|
_, predicted = torch.max(outputs, 1) |
|
|
class_name = classes[predicted.item()] |
|
|
|
|
|
return class_name |
|
|
|
|
|
|
|
|
interface = gr.Interface( |
|
|
fn=predict, |
|
|
inputs=gr.Image(type="pil"), |
|
|
outputs="text", |
|
|
title="Flower Classification", |
|
|
description="Upload an image of a flower to classify it into one of the five categories: daisy, dandelion, rose, sunflower, or tulip." |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
interface.launch() |
|
|
|