Rohitsharma15 commited on
Commit
9d003a2
·
verified ·
1 Parent(s): 4f6dac7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -36
app.py CHANGED
@@ -5,74 +5,86 @@ from ultralytics import YOLO
5
  import cv2
6
  import gradio as gr
7
  import numpy as np
 
8
  from ultralytics import YOLO
9
 
10
- # Load the YOLOv8 model
11
- model = YOLO("yolov8n.pt") # Using pre-trained YOLOv8 nano model
12
-
13
- # Object classes in YOLOv8
14
- CLASS_NAMES = model.names
15
- HUMAN_CLASS_ID = 0 # Class ID for "person" in YOLO
16
 
 
17
  def vid_inf(vid_path, contour_thresh):
18
  cap = cv2.VideoCapture(vid_path)
19
-
20
- # Get the video frames' width and height
21
  frame_width = int(cap.get(3))
22
  frame_height = int(cap.get(4))
23
  fps = int(cap.get(cv2.CAP_PROP_FPS))
24
  frame_size = (frame_width, frame_height)
25
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
26
  output_video = "output_recorded.mp4"
 
 
27
  out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
28
 
 
29
  backSub = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=25, detectShadows=True)
30
-
 
31
  if not cap.isOpened():
32
  print("Error opening video file")
33
- return
34
 
35
  count = 0
 
36
  while cap.isOpened():
37
  ret, frame = cap.read()
38
  if not ret:
39
  break
40
 
41
- # YOLOv8 Object Detection
42
- results = model(frame)
43
- detected_boxes = []
44
-
45
- for result in results:
46
- for box in result.boxes:
47
- class_id = int(box.cls[0].item())
48
- conf = box.conf[0].item()
49
-
50
- if class_id != HUMAN_CLASS_ID and conf > 0.5: # Ignore humans, detect other objects
51
- x1, y1, x2, y2 = map(int, box.xyxy[0]) # Bounding box coordinates
52
- detected_boxes.append((x1, y1, x2, y2))
53
-
54
  fg_mask = backSub.apply(frame)
55
  retval, mask_thresh = cv2.threshold(fg_mask, 200, 255, cv2.THRESH_BINARY)
 
 
56
  kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
57
  mask_eroded = cv2.morphologyEx(mask_thresh, cv2.MORPH_OPEN, kernel)
58
 
 
59
  contours, _ = cv2.findContours(mask_eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
60
-
61
- min_contour_area = contour_thresh
62
- large_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > min_contour_area]
63
 
64
  frame_out = frame.copy()
65
 
66
- # Draw bounding boxes only on non-human moving objects
67
- for cnt in large_contours:
68
- x, y, w, h = cv2.boundingRect(cnt)
69
- for (x1, y1, x2, y2) in detected_boxes:
70
- if x > x1 and y > y1 and (x + w) < x2 and (y + h) < y2: # Ensure it's inside an object
71
- frame_out = cv2.rectangle(frame_out, (x, y), (x + w, y + h), (0, 0, 200), 3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  frame_out_final = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
74
  out.write(frame_out)
75
 
 
76
  if not count % 12:
77
  yield frame_out_final, None
78
  count += 1
@@ -82,9 +94,9 @@ def vid_inf(vid_path, contour_thresh):
82
  cv2.destroyAllWindows()
83
  yield None, output_video
84
 
85
- # Gradio interface
86
  input_video = gr.Video(label="Input Video")
87
- contour_thresh = gr.Slider(0, 10000, value=4, label="Contour Threshold", info="Adjust the threshold based on package size.")
88
  output_frames = gr.Image(label="Output Frames")
89
  output_video_file = gr.Video(label="Output Video")
90
 
@@ -92,11 +104,13 @@ app = gr.Interface(
92
  fn=vid_inf,
93
  inputs=[input_video, contour_thresh],
94
  outputs=[output_frames, output_video_file],
95
- title="Package Tracking using YOLOv8 & Motion Detection",
96
- description="A smart video analysis tool that uses YOLOv8 to track packages while ignoring human movement.",
97
  allow_flagging="never",
98
  examples=[["./sample/car.mp4", "1000"], ["./sample/motion_test.mp4", "5000"], ["./sample/home.mp4", "4500"]],
99
  cache_examples=False,
100
  )
 
101
  app.queue().launch()
102
 
 
 
5
  import cv2
6
  import gradio as gr
7
  import numpy as np
8
+ import torch
9
  from ultralytics import YOLO
10
 
11
+ # Load YOLOv8 model (pre-trained)
12
+ model = YOLO("yolov8n.pt") # Use a small YOLOv8 model for efficiency
 
 
 
 
13
 
14
+ # Video Inference Function
15
  def vid_inf(vid_path, contour_thresh):
16
  cap = cv2.VideoCapture(vid_path)
17
+
18
+ # Get video properties
19
  frame_width = int(cap.get(3))
20
  frame_height = int(cap.get(4))
21
  fps = int(cap.get(cv2.CAP_PROP_FPS))
22
  frame_size = (frame_width, frame_height)
23
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
24
  output_video = "output_recorded.mp4"
25
+
26
+ # Video Writer
27
  out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
28
 
29
+ # Background Subtraction Model
30
  backSub = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=25, detectShadows=True)
31
+
32
+ # Check if video opened successfully
33
  if not cap.isOpened():
34
  print("Error opening video file")
35
+ return None, None
36
 
37
  count = 0
38
+
39
  while cap.isOpened():
40
  ret, frame = cap.read()
41
  if not ret:
42
  break
43
 
44
+ # Apply Background Subtraction (Motion Detection)
 
 
 
 
 
 
 
 
 
 
 
 
45
  fg_mask = backSub.apply(frame)
46
  retval, mask_thresh = cv2.threshold(fg_mask, 200, 255, cv2.THRESH_BINARY)
47
+
48
+ # Erosion to remove noise
49
  kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
50
  mask_eroded = cv2.morphologyEx(mask_thresh, cv2.MORPH_OPEN, kernel)
51
 
52
+ # Find Contours (Potential Moving Objects)
53
  contours, _ = cv2.findContours(mask_eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
54
+ large_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > contour_thresh]
 
 
55
 
56
  frame_out = frame.copy()
57
 
58
+ # Run YOLOv8 Object Detection
59
+ results = model(frame, verbose=False) # Perform object detection
60
+
61
+ for result in results:
62
+ boxes = result.boxes.xyxy # Bounding boxes
63
+ class_ids = result.boxes.cls # Class IDs
64
+ confs = result.boxes.conf # Confidence scores
65
+
66
+ for box, class_id, conf in zip(boxes, class_ids, confs):
67
+ if conf < 0.5: # Skip low-confidence detections
68
+ continue
69
+
70
+ class_name = model.names[int(class_id)] # Get class name
71
+
72
+ if class_name in ["box", "suitcase", "handbag", "backpack"]: # Consider as a package
73
+ x1, y1, x2, y2 = map(int, box)
74
+
75
+ # Check if detected package overlaps with a moving contour
76
+ for cnt in large_contours:
77
+ cx, cy, cw, ch = cv2.boundingRect(cnt)
78
+ if x1 < cx < x2 and y1 < cy < y2:
79
+ # Draw bounding box for detected package
80
+ cv2.rectangle(frame_out, (x1, y1), (x2, y2), (0, 255, 0), 3)
81
+ cv2.putText(frame_out, f"{class_name} ({conf:.2f})", (x1, y1 - 10),
82
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
83
 
84
  frame_out_final = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
85
  out.write(frame_out)
86
 
87
+ # Display every 12th frame
88
  if not count % 12:
89
  yield frame_out_final, None
90
  count += 1
 
94
  cv2.destroyAllWindows()
95
  yield None, output_video
96
 
97
+ # Gradio Interface
98
  input_video = gr.Video(label="Input Video")
99
+ contour_thresh = gr.Slider(0, 10000, value=500, label="Contour Threshold")
100
  output_frames = gr.Image(label="Output Frames")
101
  output_video_file = gr.Video(label="Output Video")
102
 
 
104
  fn=vid_inf,
105
  inputs=[input_video, contour_thresh],
106
  outputs=[output_frames, output_video_file],
107
+ title="Package Tracking using Motion Detection & Object Detection",
108
+ description="Detects and tracks moving packages while ignoring humans using YOLOv8 and OpenCV.",
109
  allow_flagging="never",
110
  examples=[["./sample/car.mp4", "1000"], ["./sample/motion_test.mp4", "5000"], ["./sample/home.mp4", "4500"]],
111
  cache_examples=False,
112
  )
113
+
114
  app.queue().launch()
115
 
116
+