ekting commited on
Commit
9561db9
·
verified ·
1 Parent(s): d8a569d

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +80 -0
  2. final_tuned_plant_model.pth +3 -0
  3. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torch.nn as nn
4
+ from torchvision import models, transforms
5
+ import numpy as np
6
+ from PIL import Image
7
+ from pytorch_grad_cam import GradCAM
8
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
9
+ from pytorch_grad_cam.utils.image import show_cam_on_image
10
+
11
+ # --- 1. CONFIGURATION ---
12
+ # Replace these with your actual class names in order
13
+ CLASS_NAMES = ["Class_0", "Class_1", "Class_2"] # Update this list!
14
+
15
+ def load_model():
16
+ # Must match the architecture from your Colab script
17
+ model = models.mobilenet_v2(weights=None)
18
+
19
+ # Matching your 'model.classifier[1]' structure from the script
20
+ num_ftrs = model.last_channel
21
+ model.classifier = nn.Sequential(
22
+ nn.Dropout(p=0.2),
23
+ nn.Linear(num_ftrs, len(CLASS_NAMES))
24
+ )
25
+
26
+ # Load the specific weights you saved
27
+ model.load_state_dict(torch.load("final_tuned_plant_model.pth", map_location=torch.device('cpu')))
28
+ model.eval()
29
+ return model
30
+
31
+ model = load_model()
32
+
33
+ # --- 2. PREPROCESSING ---
34
+ transform = transforms.Compose([
35
+ transforms.Resize((224, 224)),
36
+ transforms.ToTensor(),
37
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
38
+ ])
39
+
40
+ def analyze_plant(img):
41
+ if img is None:
42
+ return None, None
43
+
44
+ # Prepare Image
45
+ input_tensor = transform(img).unsqueeze(0)
46
+
47
+ # 1. Get Prediction
48
+ with torch.no_grad():
49
+ outputs = model(input_tensor)
50
+ probabilities = torch.nn.functional.softmax(outputs[0], dim=0)
51
+ confidences = {CLASS_NAMES[i]: float(probabilities[i]) for i in range(len(CLASS_NAMES))}
52
+
53
+ # 2. Generate Grad-CAM
54
+ # For MobileNetV2, the last conv layer is model.features[-1]
55
+ target_layers = [model.features[-1]]
56
+ cam = GradCAM(model=model, target_layers=target_layers)
57
+
58
+ targets = [ClassifierOutputTarget(np.argmax(probabilities.numpy()))]
59
+ grayscale_cam = cam(input_tensor=input_tensor, targets=targets)[0, :]
60
+
61
+ # Create Visual Overlay
62
+ rgb_img = np.array(img.resize((224, 224))) / 255.0
63
+ cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
64
+
65
+ return confidences, cam_image
66
+
67
+ # --- 3. GRADIO INTERFACE ---
68
+ demo = gr.Interface(
69
+ fn=analyze_plant,
70
+ inputs=gr.Image(type="pil"),
71
+ outputs=[
72
+ gr.Label(num_top_classes=3, label="Prediction"),
73
+ gr.Image(label="Feature Focus (Grad-CAM)")
74
+ ],
75
+ title="TEK_1371068G: Plant Disease Diagnostic System",
76
+ description="Project X: Upload a leaf image to identify the disease and visualize the model's focus area."
77
+ )
78
+
79
+ if __name__ == "__main__":
80
+ demo.launch()
final_tuned_plant_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd92eaa200a4bd475f254178074b4102d57bda580fbb1b99f92f11a8e1ff6c68
3
+ size 10472295
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ gradio
4
+ pytorch-grad-cam
5
+ opencv-python-headless
6
+ numpy
7
+ pillow