PraneshJs commited on
Commit
32fb896
·
verified ·
1 Parent(s): f8ed6f1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +251 -0
app.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==========================================================
2
+ # YOLOv5n Visualizer — "Inside Object Detection"
3
+ # - Uses small YOLOv5n (CPU-friendly)
4
+ # - Shows detections + early/mid/late feature maps
5
+ # - Gradio 5 compatible (theme supported)
6
+ # ==========================================================
7
+
8
+ import gradio as gr
9
+ import torch
10
+ import numpy as np
11
+ from PIL import Image
12
+
13
+ # ------------------- GLOBALS -------------------
14
+
15
+ MODEL_NAME = "yolov5n" # smallest YOLOv5 model (fast & light)
16
+ DEVICE = "cpu"
17
+
18
+ MODEL = None
19
+ FEATURE_MAPS = {} # {layer_name: tensor(B,C,H,W)}
20
+
21
+
22
+ # ------------------- MODEL LOADING -------------------
23
+
24
+ def load_model():
25
+ """
26
+ Load YOLOv5n from torch.hub (ultralytics/yolov5) and
27
+ register forward hooks to capture internal feature maps.
28
+ """
29
+ global MODEL, FEATURE_MAPS
30
+ if MODEL is not None:
31
+ return MODEL
32
+
33
+ # Download and load YOLOv5n from GitHub (only on first run)
34
+ # repo 'ultralytics/yolov5' must be reachable during build/first call.
35
+ model = torch.hub.load("ultralytics/yolov5", MODEL_NAME, pretrained=True)
36
+ model.to(DEVICE)
37
+ model.eval()
38
+
39
+ FEATURE_MAPS = {}
40
+
41
+ def make_hook(name):
42
+ def hook(module, input, output):
43
+ # YOLO can run on GPU or CPU but we store CPU tensors for visualization
44
+ with torch.no_grad():
45
+ FEATURE_MAPS[name] = output.detach().cpu()
46
+ return hook
47
+
48
+ # Register hooks on some main layers in the YOLOv5 backbone/head
49
+ # We choose Conv / C3 / SPPF etc. so we can show early, mid, late stages.
50
+ for idx, m in enumerate(model.model):
51
+ cls_name = m.__class__.__name__
52
+ if cls_name in ["Conv", "C3", "Bottleneck", "BottleneckCSP", "SPPF"]:
53
+ m.register_forward_hook(make_hook(str(idx)))
54
+
55
+ MODEL = model
56
+ return MODEL
57
+
58
+
59
+ # ------------------- FEATURE MAP UTILITIES -------------------
60
+
61
+ def tensor_to_heatmap(fm, out_size):
62
+ """
63
+ Convert a feature map tensor (C,H,W) to a grayscale heatmap PIL image.
64
+ Steps:
65
+ - average over channels
66
+ - normalize to 0..1
67
+ - upscale to out_size
68
+ """
69
+ if fm.ndim != 3:
70
+ return None
71
+
72
+ fm_np = fm.numpy().astype(np.float32) # (C,H,W)
73
+ # average over channels -> (H,W)
74
+ heat = fm_np.mean(axis=0)
75
+
76
+ if np.allclose(heat, 0):
77
+ heat = np.zeros_like(heat)
78
+ else:
79
+ heat = heat - heat.min()
80
+ maxv = heat.max()
81
+ if maxv > 0:
82
+ heat = heat / maxv
83
+
84
+ heat_img = (heat * 255).astype("uint8")
85
+ pil = Image.fromarray(heat_img, mode="L")
86
+ pil = pil.resize(out_size, Image.NEAREST)
87
+ return pil
88
+
89
+
90
+ def pick_feature_maps():
91
+ """
92
+ After a forward pass, FEATURE_MAPS has many layers.
93
+ We pick up to 3 layers: early, middle, late.
94
+ Returns: list of (name, tensor(C,H,W))
95
+ """
96
+ if not FEATURE_MAPS:
97
+ return []
98
+
99
+ # keys are layer indices as strings: "0", "1", "4", ...
100
+ keys = sorted(FEATURE_MAPS.keys(), key=lambda x: int(x))
101
+ fms = [FEATURE_MAPS[k][0] for k in keys] # take batch 0
102
+
103
+ # pick early, mid, late
104
+ idxs = [0, len(fms) // 2, len(fms) - 1]
105
+ idxs = sorted(list(set(idxs))) # remove duplicate indices
106
+
107
+ chosen = []
108
+ for i in idxs:
109
+ chosen.append((keys[i], fms[i]))
110
+ return chosen
111
+
112
+
113
+ # ------------------- MAIN ANALYSIS FUNCTION -------------------
114
+
115
+ def analyze_yolo(img, conf_thres, iou_thres, simple_mode):
116
+ """
117
+ Run YOLO on the input image and return:
118
+ - detection overlay image
119
+ - early feature map heatmap
120
+ - mid feature map heatmap
121
+ - late feature map heatmap
122
+ - explanation markdown
123
+ """
124
+ if img is None:
125
+ return (
126
+ None, # det img
127
+ None, # early fm
128
+ None, # mid fm
129
+ None, # late fm
130
+ "⚠️ Please upload an image first."
131
+ )
132
+
133
+ model = load_model()
134
+
135
+ # Clear old feature maps
136
+ FEATURE_MAPS.clear()
137
+
138
+ # In Gradio, `type="pil"` gives a PIL image already
139
+ pil = img
140
+
141
+ # Configure thresholds
142
+ model.conf = float(conf_thres)
143
+ model.iou = float(iou_thres)
144
+
145
+ with torch.no_grad():
146
+ results = model(pil)
147
+
148
+ # YOLOv5 .render() draws boxes and labels on the image
149
+ rendered = results.render()[0] # numpy array (H,W,C)
150
+ det_img = Image.fromarray(rendered)
151
+
152
+ # Collect feature maps from hooks
153
+ chosen_fms = pick_feature_maps()
154
+ W, H = pil.size
155
+ heatmaps = [None, None, None] # early, mid, late
156
+
157
+ for idx, item in enumerate(chosen_fms):
158
+ name, fm = item
159
+ hm = tensor_to_heatmap(fm, (W, H))
160
+ heatmaps[idx] = hm
161
+
162
+ # Build readable explanation
163
+ if simple_mode:
164
+ explanation = (
165
+ "🧒 **Simple explanation of what you see:**\n\n"
166
+ "1. YOLO first looks at your image and tries to find basic patterns like edges and corners.\n"
167
+ "2. Then it builds more complex shapes (like parts of objects: wheels, faces, etc.).\n"
168
+ "3. In the last layers, it focuses on whole objects and decides **what** and **where** they are.\n\n"
169
+ "**From top to bottom:**\n"
170
+ "- Left: final detections (boxes + labels).\n"
171
+ "- Early heatmap: where YOLO sees low-level details.\n"
172
+ "- Middle heatmap: where it sees object parts.\n"
173
+ "- Late heatmap: where it focuses on full objects.\n"
174
+ )
175
+ else:
176
+ explanation = (
177
+ "🔬 **Technical explanation:**\n\n"
178
+ "- We run `yolov5n` (small YOLOv5) on CPU.\n"
179
+ "- Forward hooks capture intermediate feature maps from several Conv/C3/SPPF blocks.\n"
180
+ "- For each selected layer, we take the tensor `(C,H,W)`, average over channels to get a 2D\n"
181
+ " activation map `(H,W)`, normalize it, and upscale it to the original image size.\n"
182
+ "- Early feature map ≈ low-level features (edges, textures).\n"
183
+ "- Middle feature map ≈ mid-level features (parts, shapes).\n"
184
+ "- Late feature map ≈ high-level features (object-centric regions used for detection).\n"
185
+ )
186
+
187
+ # Append layer shapes info if available
188
+ fm_shapes_info = []
189
+ for name, fm in chosen_fms:
190
+ fm_shapes_info.append(f"Layer {name}: shape {tuple(fm.shape)} (C,H,W)")
191
+ if fm_shapes_info:
192
+ explanation += "\n**Feature map shapes captured:**\n" + "\n".join(f"- {s}" for s in fm_shapes_info)
193
+
194
+ return det_img, heatmaps[0], heatmaps[1], heatmaps[2], explanation
195
+
196
+
197
+ # ------------------- GRADIO UI (GRADIO 5) -------------------
198
+
199
+ with gr.Blocks(
200
+ title="YOLOv5n Visualizer — Inside Object Detection",
201
+ theme=gr.themes.Soft()
202
+ ) as demo:
203
+
204
+ gr.Markdown("# 🧠 YOLOv5n Visualizer — See Inside Object Detection")
205
+ gr.Markdown(
206
+ "Upload an image and see YOLO work **step by step**:\n"
207
+ "1. Final detections (boxes & labels)\n"
208
+ "2. Early feature activations (edges/textures)\n"
209
+ "3. Middle feature activations (parts/shapes)\n"
210
+ "4. Late feature activations (object focus)\n"
211
+ "Use the explanation toggle for simple or technical view."
212
+ )
213
+
214
+ with gr.Row():
215
+ with gr.Column(scale=1):
216
+ in_img = gr.Image(
217
+ label="Step 0 — Input image",
218
+ type="pil"
219
+ )
220
+ conf_slider = gr.Slider(
221
+ 0.1, 0.9, step=0.05, value=0.25,
222
+ label="Confidence threshold"
223
+ )
224
+ iou_slider = gr.Slider(
225
+ 0.1, 0.9, step=0.05, value=0.45,
226
+ label="IoU threshold (for NMS)"
227
+ )
228
+ simple_ck = gr.Checkbox(
229
+ label="Explain in simple terms (kids/elders)",
230
+ value=True
231
+ )
232
+ run_btn = gr.Button("Run YOLO & Visualize", variant="primary")
233
+
234
+ with gr.Column(scale=1):
235
+ out_det = gr.Image(label="Step 4 — Final detections (YOLOv5n)")
236
+ explanation_md = gr.Markdown(label="Explanation")
237
+
238
+ gr.Markdown("### 🔍 Steps inside the network (feature maps)")
239
+
240
+ with gr.Row():
241
+ fm1 = gr.Image(label="Step 1 — Early layer activation (edges & textures)", interactive=False)
242
+ fm2 = gr.Image(label="Step 2 — Middle layer activation (parts & shapes)", interactive=False)
243
+ fm3 = gr.Image(label="Step 3 — Late layer activation (objects)", interactive=False)
244
+
245
+ run_btn.click(
246
+ analyze_yolo,
247
+ inputs=[in_img, conf_slider, iou_slider, simple_ck],
248
+ outputs=[out_det, fm1, fm2, fm3, explanation_md]
249
+ )
250
+
251
+ demo.launch()