Sathwik P commited on
Commit
829aece
Β·
1 Parent(s): 103cf1d

Initial deployment: Add Gradio app & model

Browse files
app.py ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import json
4
+ from PIL import Image
5
+ from torchvision import transforms
6
+ import time
7
+ import pandas as pd
8
+ from pathlib import Path
9
+ import io
10
+ import base64
11
+ from reportlab.lib.pagesizes import letter, A4
12
+ from reportlab.lib import colors
13
+ from reportlab.lib.units import inch
14
+ from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph, Spacer, PageBreak, Image as RLImage
15
+ from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
16
+ from reportlab.lib.enums import TA_CENTER, TA_LEFT
17
+ from datetime import datetime
18
+
19
+ print("βœ… Packages installed!\n")
20
+ print("πŸš€ Creating Gradio Interface...\n")
21
+
22
+ # ==================== LOAD MODEL & METADATA ====================
23
+ class BusClassifierInference:
24
+ def __init__(self, model_path='deployment/bus_classifier_traced.pt',
25
+ metadata_path='deployment/model_metadata.json'):
26
+ """Initialize the inference model"""
27
+
28
+ # Load metadata
29
+ with open(metadata_path, 'r') as f:
30
+ self.metadata = json.load(f)
31
+
32
+ self.class_names = self.metadata['class_names']
33
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
34
+
35
+ print(f"πŸ”§ Loading model on {self.device.upper()}...")
36
+
37
+ # Try loading TorchScript first, fallback to PyTorch checkpoint
38
+ try:
39
+ self.model = torch.jit.load(model_path, map_location=self.device)
40
+ print(f"βœ… TorchScript model loaded from {model_path}")
41
+ except:
42
+ print(f"⚠️ TorchScript not found, loading PyTorch checkpoint...")
43
+ from torchvision import models
44
+
45
+ # Load checkpoint
46
+ checkpoint = torch.load('deployment/bus_classifier.pth', map_location=self.device)
47
+
48
+ # Recreate model architecture
49
+ self.model = models.efficientnet_b0(weights=None)
50
+ num_features = self.model.classifier[1].in_features
51
+ self.model.classifier[1] = torch.nn.Linear(num_features, len(self.class_names))
52
+
53
+ # Load weights
54
+ self.model.load_state_dict(checkpoint['model_state_dict'])
55
+ self.model = self.model.to(self.device)
56
+ print(f"βœ… PyTorch checkpoint loaded")
57
+
58
+ self.model.eval()
59
+
60
+ # Define transform
61
+ self.transform = transforms.Compose([
62
+ transforms.Resize((self.metadata['image_size'], self.metadata['image_size'])),
63
+ transforms.ToTensor(),
64
+ transforms.Normalize(
65
+ mean=self.metadata['normalization']['mean'],
66
+ std=self.metadata['normalization']['std']
67
+ )
68
+ ])
69
+
70
+ print(f"βœ… Model ready for inference!")
71
+ print(f"πŸ“Š Classes: {', '.join(self.class_names)}\n")
72
+
73
+ def predict_single(self, image):
74
+ """Predict class for a single image"""
75
+ start_time = time.time()
76
+
77
+ # Load image if path provided
78
+ if isinstance(image, (str, Path)):
79
+ image = Image.open(image).convert('RGB')
80
+ elif not isinstance(image, Image.Image):
81
+ image = Image.fromarray(image).convert('RGB')
82
+
83
+ # Preprocess
84
+ input_tensor = self.transform(image).unsqueeze(0).to(self.device)
85
+
86
+ # Inference
87
+ with torch.no_grad():
88
+ logits = self.model(input_tensor)
89
+ probs = torch.softmax(logits, dim=1)
90
+ pred_class_idx = torch.argmax(probs, dim=1).item()
91
+ confidence = probs[0][pred_class_idx].item()
92
+
93
+ inference_time = time.time() - start_time
94
+
95
+ # Get all probabilities
96
+ all_probs = {
97
+ self.class_names[i]: float(probs[0][i].item())
98
+ for i in range(len(self.class_names))
99
+ }
100
+
101
+ # Sort by confidence
102
+ sorted_probs = dict(sorted(all_probs.items(), key=lambda x: x[1], reverse=True))
103
+
104
+ return {
105
+ 'predicted_class': self.class_names[pred_class_idx],
106
+ 'confidence': confidence,
107
+ 'all_probabilities': sorted_probs,
108
+ 'inference_time_ms': inference_time * 1000
109
+ }
110
+
111
+ def predict_batch(self, images):
112
+ """Predict for multiple images"""
113
+ results = []
114
+ total_start = time.time()
115
+
116
+ for idx, image in enumerate(images):
117
+ result = self.predict_single(image)
118
+ result['image_index'] = idx + 1
119
+ results.append(result)
120
+
121
+ total_time = time.time() - total_start
122
+
123
+ return results, total_time
124
+
125
+ # Initialize model
126
+ print("="*80)
127
+ predictor = BusClassifierInference()
128
+ print("="*80)
129
+
130
+ # ==================== PDF GENERATION FUNCTION ====================
131
+ def generate_pdf_report(results, images, total_time):
132
+ """Generate a professional PDF report"""
133
+
134
+ # Create temporary file
135
+ pdf_filename = f"classification_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf"
136
+
137
+ # Create PDF
138
+ doc = SimpleDocTemplate(pdf_filename, pagesize=letter)
139
+ story = []
140
+ styles = getSampleStyleSheet()
141
+
142
+ # Custom styles
143
+ title_style = ParagraphStyle(
144
+ 'CustomTitle',
145
+ parent=styles['Heading1'],
146
+ fontSize=24,
147
+ textColor=colors.HexColor('#667eea'),
148
+ spaceAfter=30,
149
+ alignment=TA_CENTER,
150
+ fontName='Helvetica-Bold'
151
+ )
152
+
153
+ heading_style = ParagraphStyle(
154
+ 'CustomHeading',
155
+ parent=styles['Heading2'],
156
+ fontSize=16,
157
+ textColor=colors.HexColor('#333333'),
158
+ spaceAfter=12,
159
+ spaceBefore=12,
160
+ fontName='Helvetica-Bold'
161
+ )
162
+
163
+ # Title
164
+ story.append(Paragraph("🚌 Bus Component Classification Report", title_style))
165
+ story.append(Paragraph(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", styles['Normal']))
166
+ story.append(Spacer(1, 0.3*inch))
167
+
168
+ # Summary Section
169
+ story.append(Paragraph("πŸ“Š Executive Summary", heading_style))
170
+
171
+ summary_data = [
172
+ ['Metric', 'Value'],
173
+ ['Total Images Processed', str(len(images))],
174
+ ['Total Processing Time', f'{total_time:.2f} seconds'],
175
+ ['Average Time per Image', f'{total_time/len(images)*1000:.2f} ms'],
176
+ ['Model Used', 'EfficientNet-B0'],
177
+ ['Model Accuracy', '98.71%'],
178
+ ['Device', predictor.device.upper()],
179
+ ]
180
+
181
+ summary_table = Table(summary_data, colWidths=[3*inch, 3*inch])
182
+ summary_table.setStyle(TableStyle([
183
+ ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#667eea')),
184
+ ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
185
+ ('ALIGN', (0, 0), (-1, -1), 'LEFT'),
186
+ ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
187
+ ('FONTSIZE', (0, 0), (-1, 0), 12),
188
+ ('BOTTOMPADDING', (0, 0), (-1, 0), 12),
189
+ ('BACKGROUND', (0, 1), (-1, -1), colors.beige),
190
+ ('GRID', (0, 0), (-1, -1), 1, colors.black),
191
+ ('FONTNAME', (0, 1), (-1, -1), 'Helvetica'),
192
+ ('FONTSIZE', (0, 1), (-1, -1), 10),
193
+ ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]),
194
+ ]))
195
+
196
+ story.append(summary_table)
197
+ story.append(Spacer(1, 0.3*inch))
198
+
199
+ # Performance Metrics
200
+ story.append(Paragraph("πŸ“ˆ Performance Metrics", heading_style))
201
+
202
+ avg_confidence = sum([r['confidence'] for r in results]) / len(results)
203
+ high_conf = sum([1 for r in results if r['confidence'] >= 0.95])
204
+ medium_conf = sum([1 for r in results if 0.80 <= r['confidence'] < 0.95])
205
+ low_conf = sum([1 for r in results if r['confidence'] < 0.80])
206
+
207
+ perf_data = [
208
+ ['Performance Metric', 'Value', 'Percentage'],
209
+ ['Average Confidence', f'{avg_confidence*100:.2f}%', '-'],
210
+ ['High Confidence (β‰₯95%)', str(high_conf), f'{high_conf/len(images)*100:.1f}%'],
211
+ ['Medium Confidence (80-95%)', str(medium_conf), f'{medium_conf/len(images)*100:.1f}%'],
212
+ ['Low Confidence (<80%)', str(low_conf), f'{low_conf/len(images)*100:.1f}%'],
213
+ ]
214
+
215
+ perf_table = Table(perf_data, colWidths=[2.5*inch, 1.5*inch, 1.5*inch])
216
+ perf_table.setStyle(TableStyle([
217
+ ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#4CAF50')),
218
+ ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
219
+ ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
220
+ ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
221
+ ('FONTSIZE', (0, 0), (-1, 0), 11),
222
+ ('BOTTOMPADDING', (0, 0), (-1, 0), 12),
223
+ ('GRID', (0, 0), (-1, -1), 1, colors.black),
224
+ ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]),
225
+ ]))
226
+
227
+ story.append(perf_table)
228
+ story.append(Spacer(1, 0.3*inch))
229
+
230
+ # Class Distribution
231
+ story.append(Paragraph("πŸ“¦ Class Distribution", heading_style))
232
+
233
+ class_counts = {}
234
+ for result in results:
235
+ pred = result['predicted_class']
236
+ class_counts[pred] = class_counts.get(pred, 0) + 1
237
+
238
+ dist_data = [['Class Name', 'Count', 'Percentage']]
239
+ for class_name, count in sorted(class_counts.items(), key=lambda x: x[1], reverse=True):
240
+ dist_data.append([class_name, str(count), f'{count/len(images)*100:.1f}%'])
241
+
242
+ dist_table = Table(dist_data, colWidths=[3*inch, 1.5*inch, 1.5*inch])
243
+ dist_table.setStyle(TableStyle([
244
+ ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#2196F3')),
245
+ ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
246
+ ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
247
+ ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
248
+ ('FONTSIZE', (0, 0), (-1, 0), 11),
249
+ ('BOTTOMPADDING', (0, 0), (-1, 0), 12),
250
+ ('GRID', (0, 0), (-1, -1), 1, colors.black),
251
+ ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]),
252
+ ]))
253
+
254
+ story.append(dist_table)
255
+ story.append(PageBreak())
256
+
257
+ # Detailed Results
258
+ story.append(Paragraph("πŸ” Detailed Classification Results", heading_style))
259
+ story.append(Spacer(1, 0.2*inch))
260
+
261
+ # Create detailed table
262
+ detail_data = [['#', 'Predicted Class', 'Confidence', 'Time (ms)', '2nd Best', '2nd Conf']]
263
+
264
+ for result in results:
265
+ second_best = list(result['all_probabilities'].keys())[1]
266
+ second_conf = list(result['all_probabilities'].values())[1]
267
+
268
+ detail_data.append([
269
+ str(result['image_index']),
270
+ result['predicted_class'],
271
+ f"{result['confidence']*100:.2f}%",
272
+ f"{result['inference_time_ms']:.2f}",
273
+ second_best,
274
+ f"{second_conf*100:.2f}%"
275
+ ])
276
+
277
+ detail_table = Table(detail_data, colWidths=[0.5*inch, 1.8*inch, 1*inch, 0.9*inch, 1.8*inch, 1*inch])
278
+ detail_table.setStyle(TableStyle([
279
+ ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor('#764ba2')),
280
+ ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
281
+ ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
282
+ ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
283
+ ('FONTSIZE', (0, 0), (-1, 0), 9),
284
+ ('BOTTOMPADDING', (0, 0), (-1, 0), 12),
285
+ ('GRID', (0, 0), (-1, -1), 1, colors.black),
286
+ ('FONTSIZE', (0, 1), (-1, -1), 8),
287
+ ('ROWBACKGROUNDS', (0, 1), (-1, -1), [colors.white, colors.lightgrey]),
288
+ ]))
289
+
290
+ story.append(detail_table)
291
+ story.append(Spacer(1, 0.3*inch))
292
+
293
+ # Footer
294
+ story.append(Spacer(1, 0.5*inch))
295
+ footer_style = ParagraphStyle(
296
+ 'Footer',
297
+ parent=styles['Normal'],
298
+ fontSize=9,
299
+ textColor=colors.grey,
300
+ alignment=TA_CENTER
301
+ )
302
+ story.append(Paragraph("Bus Component Classification System v1.0 | Powered by EfficientNet-B0", footer_style))
303
+ story.append(Paragraph("This report is auto-generated and contains AI predictions.", footer_style))
304
+
305
+ # Build PDF
306
+ doc.build(story)
307
+
308
+ print(f"βœ… PDF Report generated: {pdf_filename}")
309
+ return pdf_filename
310
+
311
+ # ==================== GRADIO INTERFACE FUNCTIONS ====================
312
+
313
+ def predict_images(images):
314
+ """Main prediction function for Gradio interface"""
315
+
316
+ if images is None or len(images) == 0:
317
+ return "<h3 style='color: #F44336; text-align: center;'>⚠️ Please upload at least one image!</h3>", None
318
+
319
+ if len(images) > 50:
320
+ return f"<h3 style='color: #F44336; text-align: center;'>⚠️ Maximum 50 images allowed! You uploaded {len(images)} images.</h3>", None
321
+
322
+ print(f"\nπŸ” Processing {len(images)} image(s)...")
323
+
324
+ # Get predictions
325
+ results, total_time = predictor.predict_batch(images)
326
+
327
+ # Generate PDF Report
328
+ pdf_file = generate_pdf_report(results, images, total_time)
329
+
330
+ # Calculate class distribution
331
+ class_counts = {}
332
+ for result in results:
333
+ pred = result['predicted_class']
334
+ class_counts[pred] = class_counts.get(pred, 0) + 1
335
+
336
+ # ==================== BUILD COMPACT GRID OUTPUT ====================
337
+ html_output = f"""
338
+ <div style="font-family: 'Segoe UI', Arial, sans-serif; max-width: 1400px; margin: 0 auto;">
339
+
340
+ <!-- Summary Stats -->
341
+ <div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 12px 20px; border-radius: 8px; margin-bottom: 20px; color: white; display: flex; justify-content: space-around; align-items: center; flex-wrap: wrap; gap: 10px;">
342
+ <div><strong>πŸ“Š Images:</strong> {len(images)}</div>
343
+ <div><strong>⏱️ Total Time:</strong> {total_time:.2f}s</div>
344
+ <div><strong>⚑ Avg Time:</strong> {total_time/len(images)*1000:.0f}ms</div>
345
+ <div><strong>🎯 High Confidence:</strong> {sum([1 for r in results if r['confidence'] >= 0.95])}/{len(images)}</div>
346
+ </div>
347
+
348
+ <!-- Class Distribution Chart -->
349
+ <div style="background: white; padding: 15px; border-radius: 8px; margin-bottom: 20px; border: 2px solid #667eea;">
350
+ <h3 style="margin: 0 0 15px 0; color: #333; font-size: 18px;">πŸ“¦ Class Distribution</h3>
351
+ <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 12px;">
352
+ """
353
+
354
+ # Add class distribution bars
355
+ for class_name, count in sorted(class_counts.items(), key=lambda x: x[1], reverse=True):
356
+ percentage = (count / len(images)) * 100
357
+ html_output += f"""
358
+ <div style="background: #f5f5f5; padding: 12px; border-radius: 6px; border-left: 4px solid #667eea;">
359
+ <div style="display: flex; justify-content: space-between; margin-bottom: 6px;">
360
+ <strong style="color: #333; font-size: 13px;">{class_name}</strong>
361
+ <span style="color: #667eea; font-weight: bold; font-size: 13px;">{count}</span>
362
+ </div>
363
+ <div style="background: #e0e0e0; height: 8px; border-radius: 4px; overflow: hidden;">
364
+ <div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); width: {percentage}%; height: 100%;"></div>
365
+ </div>
366
+ <div style="text-align: right; margin-top: 4px; color: #666; font-size: 11px;">{percentage:.1f}%</div>
367
+ </div>
368
+ """
369
+
370
+ html_output += """
371
+ </div>
372
+ </div>
373
+
374
+ <!-- Results Grid (4 per row) -->
375
+ <h3 style="margin: 20px 0 15px 0; color: #333; font-size: 18px;">πŸ” Detailed Results</h3>
376
+ <div style="display: grid; grid-template-columns: repeat(auto-fill, minmax(280px, 1fr)); gap: 15px;">
377
+ """
378
+
379
+ # Individual predictions in grid
380
+ for idx, result in enumerate(results):
381
+ pred_class = result['predicted_class']
382
+ confidence = result['confidence']
383
+ inf_time = result['inference_time_ms']
384
+
385
+ # Color based on confidence
386
+ if confidence >= 0.95:
387
+ border_color = "#4CAF50"
388
+ badge_color = "#4CAF50"
389
+ elif confidence >= 0.80:
390
+ border_color = "#FF9800"
391
+ badge_color = "#FF9800"
392
+ else:
393
+ border_color = "#F44336"
394
+ badge_color = "#F44336"
395
+
396
+ # Get the actual image
397
+ img = images[idx]
398
+ if isinstance(img, str):
399
+ with open(img, 'rb') as f:
400
+ img_data = f.read()
401
+ else:
402
+ img_pil = Image.open(img).convert('RGB')
403
+ buffer = io.BytesIO()
404
+ img_pil.save(buffer, format='JPEG')
405
+ img_data = buffer.getvalue()
406
+
407
+ img_base64 = base64.b64encode(img_data).decode()
408
+
409
+ html_output += f"""
410
+ <div style="border: 3px solid {border_color}; border-radius: 10px; overflow: hidden; background: white; box-shadow: 0 2px 8px rgba(0,0,0,0.1);">
411
+ <!-- Image -->
412
+ <div style="position: relative;">
413
+ <img src="data:image/jpeg;base64,{img_base64}"
414
+ style="width: 100%; height: 200px; object-fit: cover;"
415
+ alt="Image {idx+1}">
416
+ <div style="position: absolute; top: 8px; left: 8px; background: rgba(0,0,0,0.7); color: white; padding: 4px 10px; border-radius: 5px; font-size: 12px; font-weight: bold;">
417
+ #{idx+1}
418
+ </div>
419
+ </div>
420
+
421
+ <!-- Prediction Info -->
422
+ <div style="padding: 12px;">
423
+ <div style="background: {badge_color}; color: white; padding: 8px 12px; border-radius: 6px; margin-bottom: 8px; text-align: center;">
424
+ <div style="font-size: 14px; font-weight: bold; margin-bottom: 2px;">{pred_class}</div>
425
+ <div style="font-size: 18px; font-weight: bold;">{confidence*100:.1f}%</div>
426
+ </div>
427
+
428
+ <div style="font-size: 11px; color: #666; text-align: center;">
429
+ ⏱️ {inf_time:.1f}ms
430
+ </div>
431
+ </div>
432
+ </div>
433
+ """
434
+
435
+ html_output += """
436
+ </div>
437
+ </div>
438
+ """
439
+
440
+ print(f"βœ… Complete! Processed {len(images)} images in {total_time:.2f}s\n")
441
+
442
+ return html_output, pdf_file
443
+
444
+ # ==================== CREATE MINIMAL GRADIO INTERFACE ====================
445
+
446
+ custom_css = """
447
+ .gradio-container {
448
+ max-width: 1200px !important;
449
+ margin: auto !important;
450
+ }
451
+
452
+ /* Upload button styling */
453
+ .upload-button {
454
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
455
+ color: white !important;
456
+ font-size: 16px !important;
457
+ font-weight: bold !important;
458
+ padding: 25px 40px !important;
459
+ border-radius: 12px !important;
460
+ border: 3px dashed rgba(255, 255, 255, 0.5) !important;
461
+ cursor: pointer !important;
462
+ transition: all 0.3s ease !important;
463
+ }
464
+
465
+ .upload-button:hover {
466
+ transform: translateY(-2px) !important;
467
+ box-shadow: 0 8px 20px rgba(102, 126, 234, 0.4) !important;
468
+ border-color: white !important;
469
+ }
470
+
471
+ details summary {
472
+ cursor: pointer;
473
+ padding: 10px 15px;
474
+ background: #f0f0f0;
475
+ border-radius: 6px;
476
+ font-weight: bold;
477
+ color: #333;
478
+ border: 1px solid #ddd;
479
+ user-select: none;
480
+ }
481
+
482
+ details[open] summary {
483
+ background: #667eea;
484
+ color: white;
485
+ border-color: #667eea;
486
+ }
487
+
488
+ details {
489
+ margin-bottom: 15px;
490
+ }
491
+
492
+ details div {
493
+ padding: 10px 15px;
494
+ background: white;
495
+ border: 1px solid #ddd;
496
+ border-top: none;
497
+ border-radius: 0 0 6px 6px;
498
+ max-height: 200px;
499
+ overflow-y: auto;
500
+ }
501
+ """
502
+
503
+ with gr.Blocks(title="🚌 Bus Classifier", css=custom_css) as demo:
504
+
505
+ # Header
506
+ gr.HTML("""
507
+ <div style="text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 12px; margin-bottom: 20px; box-shadow: 0 4px 15px rgba(102,126,234,0.4);">
508
+ <h1 style="color: white; font-size: 32px; margin: 0; font-weight: bold;">🚌 Bus Component Classifier</h1>
509
+ <p style="color: white; font-size: 15px; margin: 8px 0 0 0; opacity: 0.95;">EfficientNet-B0 | Accuracy: 98.71% | Real-time Classification</p>
510
+ </div>
511
+ """)
512
+
513
+ # Collapsible System Info
514
+ with gr.Accordion("πŸ“‹ System Information", open=False):
515
+ gr.HTML(f"""
516
+ <div style="padding: 15px; background: white; border-radius: 8px; border: 2px solid #667eea;">
517
+ <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); gap: 15px; margin-bottom: 15px;">
518
+ <div style="background: #f0f4ff; padding: 12px; border-radius: 6px; border-left: 4px solid #667eea;">
519
+ <strong style="color: #333; font-size: 14px;">Model:</strong>
520
+ <span style="color: #667eea; font-weight: bold; font-size: 14px;">EfficientNet-B0</span>
521
+ </div>
522
+ <div style="background: #f0f4ff; padding: 12px; border-radius: 6px; border-left: 4px solid #667eea;">
523
+ <strong style="color: #333; font-size: 14px;">Classes:</strong>
524
+ <span style="color: #667eea; font-weight: bold; font-size: 14px;">{len(predictor.class_names)}</span>
525
+ </div>
526
+ <div style="background: #f0f4ff; padding: 12px; border-radius: 6px; border-left: 4px solid #4CAF50;">
527
+ <strong style="color: #333; font-size: 14px;">Accuracy:</strong>
528
+ <span style="color: #4CAF50; font-weight: bold; font-size: 14px;">98.71%</span>
529
+ </div>
530
+ <div style="background: #f0f4ff; padding: 12px; border-radius: 6px; border-left: 4px solid #FF9800;">
531
+ <strong style="color: #333; font-size: 14px;">Device:</strong>
532
+ <span style="color: #FF9800; font-weight: bold; font-size: 14px;">{predictor.device.upper()}</span>
533
+ </div>
534
+ <div style="background: #f0f4ff; padding: 12px; border-radius: 6px; border-left: 4px solid #2196F3;">
535
+ <strong style="color: #333; font-size: 14px;">Max Images:</strong>
536
+ <span style="color: #2196F3; font-weight: bold; font-size: 14px;">50 per batch</span>
537
+ </div>
538
+ </div>
539
+
540
+ <div style="padding: 15px; background: #f9f9f9; border-radius: 6px; border: 2px solid #ddd;">
541
+ <div style="margin-bottom: 8px;">
542
+ <strong style="color: #333; font-size: 15px;">πŸ“¦ Supported Classes:</strong>
543
+ </div>
544
+ <div style="display: flex; flex-wrap: wrap; gap: 8px;">
545
+ {' '.join([f'<span style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 6px 12px; border-radius: 20px; font-size: 13px; font-weight: bold; display: inline-block;">{cls}</span>' for cls in predictor.class_names])}
546
+ </div>
547
+ </div>
548
+ </div>
549
+ """)
550
+
551
+ # Upload Section with clear button
552
+ gr.HTML("""
553
+ <div style="margin: 20px 0 15px 0;">
554
+ <h3 style="color: #333; font-size: 20px; margin: 0; font-weight: bold;">πŸ“€ Upload Images</h3>
555
+ <p style="color: #666; font-size: 14px; margin: 5px 0 0 0;">Click the button below to select images (JPG, PNG | Max: 50 images)</p>
556
+ </div>
557
+ """)
558
+
559
+ with gr.Row():
560
+ with gr.Column():
561
+ image_input = gr.File(
562
+ file_count="multiple",
563
+ file_types=["image"],
564
+ label="",
565
+ show_label=False,
566
+ elem_classes=["upload-button"]
567
+ )
568
+
569
+ # File count and collapsible list
570
+ file_list_html = gr.HTML()
571
+
572
+ def update_file_list(files):
573
+ if not files or len(files) == 0:
574
+ return ""
575
+
576
+ file_count = len(files)
577
+
578
+ # Show first 5 files
579
+ visible_files = files[:5] if file_count > 5 else files
580
+
581
+ html = f"""
582
+ <div style="background: #f5f5f5; padding: 15px; border-radius: 8px; margin: 10px 0; border: 2px solid #ddd;">
583
+ <div style="font-weight: bold; color: #333; margin-bottom: 10px; font-size: 16px;">
584
+ πŸ“ {file_count} image{'s' if file_count != 1 else ''} selected
585
+ </div>
586
+ """
587
+
588
+ # Show first 5 files
589
+ for idx, file in enumerate(visible_files):
590
+ filename = file.name if hasattr(file, 'name') else str(file).split('/')[-1]
591
+ html += f"""
592
+ <div style="background: white; padding: 8px 12px; margin: 5px 0; border-radius: 5px; border-left: 3px solid #667eea; font-size: 13px; color: #333;">
593
+ {idx + 1}. {filename}
594
+ </div>
595
+ """
596
+
597
+ # If more than 5, show collapsible
598
+ if file_count > 5:
599
+ html += f"""
600
+ <details style="margin-top: 10px;">
601
+ <summary style="cursor: pointer; padding: 8px 12px; background: #667eea; color: white; border-radius: 5px; font-size: 14px; font-weight: bold;">
602
+ βž• Show {file_count - 5} more files
603
+ </summary>
604
+ <div style="max-height: 200px; overflow-y: auto; padding: 10px; background: white; margin-top: 5px; border-radius: 5px;">
605
+ """
606
+
607
+ for idx, file in enumerate(files[5:], start=6):
608
+ filename = file.name if hasattr(file, 'name') else str(file).split('/')[-1]
609
+ html += f"""
610
+ <div style="padding: 6px 10px; margin: 3px 0; border-radius: 4px; border-left: 3px solid #764ba2; font-size: 12px; color: #333; background: #f9f9f9;">
611
+ {idx}. {filename}
612
+ </div>
613
+ """
614
+
615
+ html += """
616
+ </div>
617
+ </details>
618
+ """
619
+
620
+ html += "</div>"
621
+ return html
622
+
623
+ image_input.change(
624
+ fn=update_file_list,
625
+ inputs=[image_input],
626
+ outputs=[file_list_html]
627
+ )
628
+
629
+ # Buttons
630
+ with gr.Row():
631
+ predict_btn = gr.Button(
632
+ "πŸ” Classify Images",
633
+ variant="primary",
634
+ size="lg"
635
+ )
636
+ clear_btn = gr.Button(
637
+ "πŸ—‘οΈ Clear All",
638
+ size="lg"
639
+ )
640
+
641
+ # Results Section
642
+ gr.HTML("""
643
+ <div style="margin: 25px 0 15px 0;">
644
+ <h3 style="color: #333; font-size: 20px; margin: 0; font-weight: bold;">πŸ“Š Classification Results</h3>
645
+ </div>
646
+ """)
647
+
648
+ results_output = gr.HTML()
649
+
650
+ # PDF Download Section
651
+ gr.HTML("""
652
+ <div style="margin: 20px 0 10px 0;">
653
+ <h3 style="color: #333; font-size: 18px; margin: 0; font-weight: bold;">πŸ“„ Download Report</h3>
654
+ </div>
655
+ """)
656
+
657
+ pdf_output = gr.File(label="", show_label=False)
658
+
659
+ # Footer Info (Collapsible)
660
+ with gr.Accordion("ℹ️ How to Interpret Results", open=False):
661
+ gr.HTML("""
662
+ <div style="padding: 15px; background: #f9f9f9; border-radius: 6px; font-size: 13px; line-height: 1.8;">
663
+ <div style="margin: 8px 0;"><span style="color: #4CAF50; font-weight: bold; font-size: 20px;">●</span> <strong style="color: #4CAF50;">Green (β‰₯95%):</strong> High confidence - Very reliable prediction</div>
664
+ <div style="margin: 8px 0;"><span style="color: #FF9800; font-weight: bold; font-size: 20px;">●</span> <strong style="color: #FF9800;">Orange (80-95%):</strong> Medium confidence - Generally reliable</div>
665
+ <div style="margin: 8px 0;"><span style="color: #F44336; font-weight: bold; font-size: 20px;">●</span> <strong style="color: #F44336;">Red (<80%):</strong> Low confidence - Manual review recommended</div>
666
+ </div>
667
+ """)
668
+
669
+ # Button actions
670
+ predict_btn.click(
671
+ fn=predict_images,
672
+ inputs=[image_input],
673
+ outputs=[results_output, pdf_output]
674
+ )
675
+
676
+ def clear_all():
677
+ return None, None, None, ""
678
+
679
+ clear_btn.click(
680
+ fn=clear_all,
681
+ inputs=[],
682
+ outputs=[image_input, results_output, pdf_output, file_list_html]
683
+ )
684
+
685
+ # ==================== LAUNCH ====================
686
+ print("\n" + "="*80)
687
+ print("πŸš€ LAUNCHING GRADIO INTERFACE (LOCAL)")
688
+ print("="*80)
689
+ print(f"Model: EfficientNet-B0")
690
+ print(f"Classes: {len(predictor.class_names)}")
691
+ print(f"Device: {predictor.device.upper()}")
692
+ print(f"{'='*80}\n")
693
+
694
+ demo.launch()
deployment/bus_classifier.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22b8b6d23d3bda108c343413664c19e4b1c887e40c4149129cf8f1bfa5fbec81
3
+ size 16353201
deployment/bus_classifier_traced.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e6b9e2b4a14e8ba14f0546c23f294562d5769a6374fbc28608db9fdb849d7e
3
+ size 16886957
deployment/model_metadata.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "class_names": [
3
+ "Alco-Brake_Device",
4
+ "Bus_-_Front_Side",
5
+ "First_Aid_Kit",
6
+ "Hat-rack_side-1",
7
+ "ITMS_Device_Functionality"
8
+ ],
9
+ "class_to_idx": {
10
+ "Alco-Brake_Device": 0,
11
+ "Bus_-_Front_Side": 1,
12
+ "First_Aid_Kit": 2,
13
+ "Hat-rack_side-1": 3,
14
+ "ITMS_Device_Functionality": 4
15
+ },
16
+ "num_classes": 5,
17
+ "image_size": 224,
18
+ "model_name": "efficientnet_b0",
19
+ "validation_accuracy": 0.9922480583190918,
20
+ "training_samples": 1533,
21
+ "validation_samples": 387,
22
+ "normalization": {
23
+ "mean": [
24
+ 0.485,
25
+ 0.456,
26
+ 0.406
27
+ ],
28
+ "std": [
29
+ 0.229,
30
+ 0.224,
31
+ 0.225
32
+ ]
33
+ }
34
+ }
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ gradio
4
+ Pillow
5
+ reportlab
6
+ numpy