Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| import torch.nn as nn | |
| from torchvision import transforms | |
| from torchvision.models import swin_t | |
| from PIL import Image | |
| # π§ Model definition | |
| class MMIM(nn.Module): | |
| def __init__(self, num_classes=36): | |
| super(MMIM, self).__init__() | |
| self.backbone = swin_t(weights='IMAGENET1K_V1') | |
| self.backbone.head = nn.Identity() | |
| self.classifier = nn.Sequential( | |
| nn.Linear(768, 512), | |
| nn.ReLU(), | |
| nn.Dropout(0.3), | |
| nn.Linear(512, num_classes) | |
| ) | |
| def forward(self, x): | |
| features = self.backbone(x) | |
| return self.classifier(features) | |
| # β Load model | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model = MMIM(num_classes=36) | |
| # π§ Load only matching weights from checkpoint (skip classifier mismatch) | |
| checkpoint = torch.load("MMIM_best.pth", map_location=device) | |
| filtered_checkpoint = { | |
| k: v for k, v in checkpoint.items() if k in model.state_dict() and model.state_dict()[k].shape == v.shape | |
| } | |
| model.load_state_dict(filtered_checkpoint, strict=False) | |
| model.to(device) | |
| model.eval() | |
| # β Correct class names list (ordered by folder names) | |
| class_names = [ | |
| "Chinee apple", "Lantana", "Negative", "Parkinsonia", "Parthenium", | |
| "Prickly acacia", "Rubber vine", "Siam weed", "Snake weed", | |
| "Black grass", "Charlock", "Cleavers", "Common Chickweed", "Common Wheat", | |
| "Fat Hen", "Loose Silky-bent", "Maize", "Scentless Mayweed", | |
| "Shepherds purse", "Small-flowered Cranesbill", "Sugar beet", | |
| "Carpetweeds", "Crabgrass", "Eclipta", "Goosegrass", "Morningglory", | |
| "Nutsedge", "PalmerAmaranth", "Pricky Sida", "Purslane", "Ragweed", | |
| "Sicklepod", "SpottedSpurge", "SpurredAnoda", "Swinecress", "Waterhemp" | |
| ] | |
| # π Image transform | |
| transform = transforms.Compose([ | |
| transforms.Resize((224, 224)), | |
| transforms.ToTensor() | |
| ]) | |
| # π Prediction function | |
| def predict(img): | |
| img = img.convert('RGB') | |
| img_tensor = transform(img).unsqueeze(0).to(device) | |
| with torch.no_grad(): | |
| outputs = model(img_tensor) | |
| probs = torch.softmax(outputs, dim=1) | |
| conf, pred = torch.max(probs, 1) | |
| predicted_class = class_names[pred.item()] | |
| confidence = conf.item() * 100 | |
| if predicted_class.lower() == "negative": | |
| return f"β οΈ This image is predicted as Negative.\nConfidence: {confidence:.2f}%" | |
| return f"β Predicted as a weed with class-{predicted_class}\nConfidence: {confidence:.2f}%" | |
| # π¨ Gradio Interface | |
| interface = gr.Interface( | |
| fn=predict, | |
| inputs=gr.Image(type="pil"), | |
| outputs="text", | |
| title="Weed Image Classifier", | |
| description="Upload a weed image to predict its class. If the model detects a non-weed image, it will return 'Negative'." | |
| ) | |
| interface.launch() | |