AlBaraa63 commited on
Commit
5e4b609
ยท
verified ยท
1 Parent(s): d0b0145

Upload 10 files

Browse files
app.py CHANGED
@@ -201,11 +201,8 @@ def main():
201
  st.markdown("### ๐Ÿ“Š Waste Categories")
202
  categories = [
203
  "๐ŸŸ  General Waste (0)",
204
- "๐ŸŸก Containers (c)",
205
  "๐Ÿ”ด Garbage",
206
- "๐ŸŸฃ Garbage Bags",
207
- "๐ŸŸข Waste",
208
- "๐ŸŸค Trash"
209
  ]
210
  for cat in categories:
211
  st.markdown(f"- {cat}")
@@ -219,9 +216,26 @@ def main():
219
  st.markdown("---")
220
  st.markdown("**Developer:** AlBaraa AlOlabi (@AlBaraa63)")
221
  st.markdown("**Track:** MCP in Action (Agents)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
223
  # Main content
224
- tab1, tab2, tab3 = st.tabs(["๐Ÿ“ธ Image Detection", "๐Ÿ“Š About", "๐Ÿ† Hackathon"])
225
 
226
  with tab1:
227
  st.markdown("### Upload an Image for Detection")
@@ -242,7 +256,7 @@ def main():
242
 
243
  with col1:
244
  st.markdown("#### Original Image")
245
- st.image(image_rgb, use_column_width=True)
246
 
247
  # Run detection
248
  with st.spinner("๐Ÿ” Detecting garbage..."):
@@ -255,7 +269,7 @@ def main():
255
  with col2:
256
  st.markdown("#### Detection Results")
257
  annotated_rgb = cv2.cvtColor(result["image"], cv2.COLOR_BGR2RGB)
258
- st.image(annotated_rgb, use_column_width=True)
259
 
260
  # Display statistics
261
  st.markdown("---")
@@ -321,6 +335,222 @@ def main():
321
  """)
322
 
323
  with tab2:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
  st.markdown("## ๐ŸŽฏ About CleanEye")
325
 
326
  st.markdown("""
 
201
  st.markdown("### ๐Ÿ“Š Waste Categories")
202
  categories = [
203
  "๐ŸŸ  General Waste (0)",
 
204
  "๐Ÿ”ด Garbage",
205
+ "๐ŸŸฃ Garbage Bags"
 
 
206
  ]
207
  for cat in categories:
208
  st.markdown(f"- {cat}")
 
216
  st.markdown("---")
217
  st.markdown("**Developer:** AlBaraa AlOlabi (@AlBaraa63)")
218
  st.markdown("**Track:** MCP in Action (Agents)")
219
+ st.markdown("---")
220
+ st.markdown("### ๐Ÿงช Test Samples")
221
+ st.info("Download and try these sample images:")
222
+ sample_dir = ROOT_DIR / "test_samples"
223
+ sample_files = [
224
+ ("sample_1_bins.jpg", "Garbage and recycle bins"),
225
+ ("sample_2_bags.jpg", "Black garbage bags"),
226
+ ("sample_3_pile.jpg", "Large garbage pile"),
227
+ ("sample_4_street.jpg", "Street waste scenario"),
228
+ ("sample_5_plastic.jpg", "Plastic waste accumulation")
229
+ ]
230
+ for fname, desc in sample_files:
231
+ fpath = sample_dir / fname
232
+ if fpath.exists():
233
+ with open(fpath, "rb") as f:
234
+ btn_label = f"โฌ‡๏ธ {desc}"
235
+ st.download_button(btn_label, f.read(), file_name=fname, mime="image/jpeg")
236
 
237
  # Main content
238
+ tab1, tab2, tab3, tab4 = st.tabs(["๐Ÿ“ธ Image Detection", "๐ŸŽฅ Video Detection", "๐Ÿ“Š About", "๐Ÿ† Hackathon"])
239
 
240
  with tab1:
241
  st.markdown("### Upload an Image for Detection")
 
256
 
257
  with col1:
258
  st.markdown("#### Original Image")
259
+ st.image(image_rgb, use_container_width=True)
260
 
261
  # Run detection
262
  with st.spinner("๐Ÿ” Detecting garbage..."):
 
269
  with col2:
270
  st.markdown("#### Detection Results")
271
  annotated_rgb = cv2.cvtColor(result["image"], cv2.COLOR_BGR2RGB)
272
+ st.image(annotated_rgb, use_container_width=True)
273
 
274
  # Display statistics
275
  st.markdown("---")
 
335
  """)
336
 
337
  with tab2:
338
+ st.markdown("### Upload a Video for Detection")
339
+
340
+ uploaded_video = st.file_uploader(
341
+ "Choose a video file (MP4, MOV, AVI)",
342
+ type=["mp4", "mov", "avi"],
343
+ help="Upload a video to analyze garbage frame-by-frame"
344
+ )
345
+
346
+ if uploaded_video is not None:
347
+ # Save uploaded video temporarily
348
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
349
+ tmp_file.write(uploaded_video.getbuffer())
350
+ temp_path = Path(tmp_file.name)
351
+
352
+ # Open video
353
+ cap = cv2.VideoCapture(str(temp_path))
354
+
355
+ if not cap.isOpened():
356
+ st.error("โŒ Unable to open video file. Please try another format.")
357
+ temp_path.unlink(missing_ok=True)
358
+ return
359
+
360
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
361
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
362
+
363
+ st.info(f"๐Ÿ“น Video Info: {total_frames} frames at {fps} FPS")
364
+
365
+ # Limit frames to analyze for cloud deployment
366
+ max_frames = st.slider("Maximum frames to analyze", 10, 200, 100,
367
+ help="Analyzing fewer frames is faster")
368
+
369
+ # Analysis button
370
+ if st.button("๐ŸŽฌ Start Analysis"):
371
+ model = load_model()
372
+
373
+ progress_bar = st.progress(0)
374
+ status_text = st.empty()
375
+ video_frame_placeholder = st.empty()
376
+
377
+ frames_analyzed = 0
378
+ detections_found = 0
379
+ unique_items = set()
380
+ detection_samples = []
381
+ annotated_frames = []
382
+
383
+ # Process video frames
384
+ frame_skip = max(1, total_frames // max_frames)
385
+
386
+ # Reset video to start
387
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
388
+
389
+ try:
390
+ while frames_analyzed < max_frames:
391
+ ret, frame = cap.read()
392
+ if not ret:
393
+ break
394
+
395
+ # Skip frames for efficiency
396
+ for _ in range(frame_skip - 1):
397
+ cap.read()
398
+
399
+ # Run detection
400
+ results = model(frame, conf=confidence, verbose=False)
401
+
402
+ # Annotate frame
403
+ annotated = frame.copy()
404
+ frame_detections = len(results[0].boxes)
405
+
406
+ if frame_detections > 0:
407
+ detections_found += frame_detections
408
+
409
+ # Draw detections
410
+ for box in results[0].boxes:
411
+ cls_id = int(box.cls[0])
412
+ conf_score = float(box.conf[0])
413
+ label = model.names[cls_id]
414
+ color = COLORS.get(label, (255, 255, 255))
415
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
416
+
417
+ cv2.rectangle(annotated, (x1, y1), (x2, y2), color, 2)
418
+ cv2.putText(annotated, f"{label} {conf_score:.0%}",
419
+ (x1, max(25, y1 - 10)),
420
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
421
+
422
+ unique_items.add(label)
423
+
424
+ # Save sample detection frames (first 5)
425
+ if len(detection_samples) < 5:
426
+ detection_samples.append(cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB))
427
+
428
+ # Show current frame being processed
429
+ if frames_analyzed % 10 == 0: # Update display every 10 frames
430
+ frame_rgb = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
431
+ video_frame_placeholder.image(frame_rgb, caption=f"Processing frame {frames_analyzed}...",
432
+ use_container_width=True)
433
+
434
+ # Store frames for optional video output (limit to 30 for performance)
435
+ if len(annotated_frames) < 30:
436
+ annotated_frames.append(annotated)
437
+
438
+ frames_analyzed += 1
439
+ progress = frames_analyzed / max_frames
440
+ progress_bar.progress(progress)
441
+ status_text.text(f"Analyzing... {frames_analyzed}/{max_frames} frames | Detections: {detections_found}")
442
+
443
+ finally:
444
+ cap.release()
445
+ temp_path.unlink(missing_ok=True)
446
+
447
+ # Clear processing display
448
+ video_frame_placeholder.empty()
449
+
450
+ # Display results
451
+ st.markdown("---")
452
+ st.markdown("### ๐Ÿ“Š Video Analysis Complete!")
453
+
454
+ col1, col2, col3 = st.columns(3)
455
+
456
+ with col1:
457
+ st.metric("๐Ÿ“น Frames Analyzed", frames_analyzed)
458
+
459
+ with col2:
460
+ st.metric("๐Ÿ—‘๏ธ Total Detections", detections_found)
461
+
462
+ with col3:
463
+ avg = detections_found / frames_analyzed if frames_analyzed > 0 else 0
464
+ st.metric("๐Ÿ“ˆ Avg per Frame", f"{avg:.2f}")
465
+
466
+ if unique_items:
467
+ st.success(f"โœ… **Found {len(unique_items)} different types of garbage:**")
468
+ st.write(", ".join(f"๐Ÿ—‘๏ธ {item}" for item in sorted(unique_items)))
469
+
470
+ # Show sample detection frames
471
+ if detection_samples:
472
+ st.markdown("### ๐Ÿ–ผ๏ธ Sample Detection Frames")
473
+ st.markdown(f"*Showing {len(detection_samples)} frames with detections*")
474
+
475
+ # Display in rows of 3
476
+ for i in range(0, len(detection_samples), 3):
477
+ cols = st.columns(3)
478
+ for idx, img in enumerate(detection_samples[i:i+3]):
479
+ with cols[idx]:
480
+ st.image(img, caption=f"Sample {i+idx+1}", use_container_width=True)
481
+
482
+ # Create a short video clip if we have frames
483
+ if annotated_frames and len(annotated_frames) >= 10:
484
+ st.markdown("### ๐ŸŽฌ Annotated Video Sample")
485
+ st.info(f"๐Ÿ“น Showing first {len(annotated_frames)} processed frames as a video sample")
486
+
487
+ # Create temporary video file with better codec
488
+ output_video_path = Path(tempfile.gettempdir()) / f"detection_output_{hash(str(annotated_frames[0].data.tobytes()))}.mp4"
489
+
490
+ # Get frame dimensions
491
+ height, width = annotated_frames[0].shape[:2]
492
+
493
+ try:
494
+ # Try H.264 codec (better browser compatibility)
495
+ fourcc = cv2.VideoWriter_fourcc(*'avc1')
496
+ out = cv2.VideoWriter(str(output_video_path), fourcc, 10.0, (width, height))
497
+
498
+ # If that fails, fallback to mp4v
499
+ if not out.isOpened():
500
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
501
+ out = cv2.VideoWriter(str(output_video_path), fourcc, 10.0, (width, height))
502
+
503
+ for frame in annotated_frames:
504
+ out.write(frame)
505
+
506
+ out.release()
507
+
508
+ # Check if video file was created successfully
509
+ if output_video_path.exists() and output_video_path.stat().st_size > 0:
510
+ # Display video
511
+ with open(output_video_path, 'rb') as video_file:
512
+ video_bytes = video_file.read()
513
+ st.video(video_bytes)
514
+
515
+ # Cleanup
516
+ output_video_path.unlink(missing_ok=True)
517
+ else:
518
+ st.warning("โš ๏ธ Could not create video file. Showing frames as slideshow instead.")
519
+ # Show as image slideshow
520
+ st.markdown("#### ๐Ÿ“ธ Detection Frames Slideshow")
521
+ frame_placeholder = st.empty()
522
+ import time
523
+ for idx, frame in enumerate(annotated_frames):
524
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
525
+ frame_placeholder.image(frame_rgb, caption=f"Frame {idx+1}/{len(annotated_frames)}",
526
+ use_container_width=True)
527
+ time.sleep(0.1) # 10 FPS
528
+
529
+ except Exception as e:
530
+ st.warning(f"โš ๏ธ Video creation failed: {str(e)}. Showing sample frames instead.")
531
+ # Fallback: show frames in a grid
532
+ st.markdown("#### ๐Ÿ“ธ All Detection Frames")
533
+ for i in range(0, len(annotated_frames), 3):
534
+ cols = st.columns(3)
535
+ for idx, frame in enumerate(annotated_frames[i:i+3]):
536
+ with cols[idx]:
537
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
538
+ st.image(frame_rgb, caption=f"Frame {i+idx+1}", use_container_width=True)
539
+
540
+ else:
541
+ st.info("โ„น๏ธ No garbage detected in this video. Try lowering the confidence threshold.")
542
+
543
+ else:
544
+ st.info("๐Ÿ‘† Upload a video to start analyzing")
545
+ st.markdown("#### ๐Ÿ’ก Video Analysis Tips:")
546
+ st.markdown("""
547
+ - Shorter videos process faster
548
+ - Clear, stable footage works best
549
+ - Good lighting improves detection accuracy
550
+ - The system analyzes frames at regular intervals
551
+ """)
552
+
553
+ with tab4:
554
  st.markdown("## ๐ŸŽฏ About CleanEye")
555
 
556
  st.markdown("""
test_samples/README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ๐Ÿงช Test Samples
2
+
3
+ Welcome! This folder contains sample images you can use to test CleanEye's garbage detection capabilities.
4
+
5
+ ## ๐Ÿ“ธ Sample Images
6
+
7
+ We've included 5 diverse test images:
8
+
9
+ 1. **sample_1_bins.jpg** - Garbage and recycle bins
10
+ 2. **sample_2_bags.jpg** - Black garbage bags
11
+ 3. **sample_3_pile.jpg** - Large pile of garbage
12
+ 4. **sample_4_street.jpg** - Street waste scenario
13
+ 5. **sample_5_plastic.jpg** - Plastic waste accumulation
14
+
15
+ ## ๐Ÿš€ How to Test
16
+
17
+ ### Option 1: Web Interface (Recommended)
18
+ ```bash
19
+ python start.py
20
+ ```
21
+ Then select option 1 to launch the Streamlit dashboard, and upload any sample image.
22
+
23
+ ### Option 2: Command Line
24
+ ```bash
25
+ # Using the detect script
26
+ python code/detect_pro.py --source test_samples/sample_1_bins.jpg
27
+
28
+ # View all detections
29
+ python code/app.py
30
+ ```
31
+
32
+ ### Option 3: HuggingFace Space
33
+ Visit: https://huggingface.co/spaces/AlBaraa63/cleaneye-garbage-detection
34
+
35
+ Upload any of these samples directly in your browser!
36
+
37
+ ## ๐ŸŽฏ What You'll See
38
+
39
+ CleanEye will detect and classify:
40
+ - ๐Ÿ—‘๏ธ **garbage** - General garbage items
41
+ - ๐Ÿ‘œ **garbage_bag** - Plastic waste bags
42
+ - ๐Ÿ“ฆ **c** - Containers
43
+ - โ™ป๏ธ **waste** - Waste materials
44
+ - ๐Ÿšฎ **trash** - Trash items
45
+ - ๐Ÿ“Š **0** - General waste category
46
+
47
+ Each detection includes:
48
+ - Bounding box highlighting
49
+ - Confidence score
50
+ - Color-coded classification
51
+ - Real-time statistics
52
+
53
+ ## ๐Ÿ’ก Tips
54
+
55
+ - Try uploading multiple images in sequence
56
+ - Test with different lighting conditions
57
+ - Compare detection accuracy across samples
58
+ - Use the video mode for webcam testing
59
+
60
+ ---
61
+
62
+ **Happy Testing!** ๐ŸŽ‰
test_samples/sample_1_bins.jpg ADDED
test_samples/sample_2_bags.jpg ADDED
test_samples/sample_3_pile.jpg ADDED
test_samples/sample_4_street.jpg ADDED
test_samples/sample_5_plastic.jpg ADDED