Rohitsharma15 commited on
Commit
9f67c7f
·
verified ·
1 Parent(s): a1e9d53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -28
app.py CHANGED
@@ -7,19 +7,17 @@ import gradio as gr
7
  import numpy as np
8
  from ultralytics import YOLO
9
 
10
- # Load the trained YOLO model
11
- model = YOLO('best.pt') # Replace with your trained model file
12
 
13
- def detect_packages(frame):
14
- results = model(frame)
15
- for result in results:
16
- for box in result.boxes:
17
- x1, y1, x2, y2 = map(int, box.xyxy[0])
18
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 3)
19
- return frame
20
 
21
  def vid_inf(vid_path, contour_thresh):
22
  cap = cv2.VideoCapture(vid_path)
 
 
23
  frame_width = int(cap.get(3))
24
  frame_height = int(cap.get(4))
25
  fps = int(cap.get(cv2.CAP_PROP_FPS))
@@ -27,33 +25,66 @@ def vid_inf(vid_path, contour_thresh):
27
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
28
  output_video = "output_recorded.mp4"
29
  out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
30
-
 
 
31
  if not cap.isOpened():
32
  print("Error opening video file")
33
- return None, None
34
-
35
  count = 0
36
  while cap.isOpened():
37
  ret, frame = cap.read()
38
- if ret:
39
- frame_out = detect_packages(frame.copy())
40
- frame_out_final = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
41
- out.write(frame_out)
42
-
43
- if not count % 12:
44
- yield frame_out_final, None
45
- count += 1
46
- else:
47
  break
48
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  cap.release()
50
  out.release()
51
  cv2.destroyAllWindows()
52
  yield None, output_video
53
 
54
- # Gradio Interface
55
  input_video = gr.Video(label="Input Video")
56
- contour_thresh = gr.Slider(0, 10000, value=4, label="Contour Threshold", info="Adjust the Contour Threshold according to the object size that you want to detect.")
57
  output_frames = gr.Image(label="Output Frames")
58
  output_video_file = gr.Video(label="Output Video")
59
 
@@ -61,11 +92,11 @@ app = gr.Interface(
61
  fn=vid_inf,
62
  inputs=[input_video, contour_thresh],
63
  outputs=[output_frames, output_video_file],
64
- title="Motion Detection using YOLOv8",
65
- description="A Gradio app for dynamic video analysis using YOLOv8 to track labeled packages while ignoring other moving objects.",
66
  allow_flagging="never",
67
- examples=[["./sample/package_test.mp4", "1000"]],
68
  cache_examples=False,
69
  )
70
-
71
  app.queue().launch()
 
 
7
  import numpy as np
8
  from ultralytics import YOLO
9
 
10
+ # Load the YOLOv8 model
11
+ model = YOLO("yolov8n.pt") # Using pre-trained YOLOv8 nano model
12
 
13
+ # Object classes in YOLOv8
14
+ CLASS_NAMES = model.names
15
+ HUMAN_CLASS_ID = 0 # Class ID for "person" in YOLO
 
 
 
 
16
 
17
  def vid_inf(vid_path, contour_thresh):
18
  cap = cv2.VideoCapture(vid_path)
19
+
20
+ # Get the video frames' width and height
21
  frame_width = int(cap.get(3))
22
  frame_height = int(cap.get(4))
23
  fps = int(cap.get(cv2.CAP_PROP_FPS))
 
25
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
26
  output_video = "output_recorded.mp4"
27
  out = cv2.VideoWriter(output_video, fourcc, fps, frame_size)
28
+
29
+ backSub = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=25, detectShadows=True)
30
+
31
  if not cap.isOpened():
32
  print("Error opening video file")
33
+ return
34
+
35
  count = 0
36
  while cap.isOpened():
37
  ret, frame = cap.read()
38
+ if not ret:
 
 
 
 
 
 
 
 
39
  break
40
+
41
+ # YOLOv8 Object Detection
42
+ results = model(frame)
43
+ detected_boxes = []
44
+
45
+ for result in results:
46
+ for box in result.boxes:
47
+ class_id = int(box.cls[0].item())
48
+ conf = box.conf[0].item()
49
+
50
+ if class_id != HUMAN_CLASS_ID and conf > 0.5: # Ignore humans, detect other objects
51
+ x1, y1, x2, y2 = map(int, box.xyxy[0]) # Bounding box coordinates
52
+ detected_boxes.append((x1, y1, x2, y2))
53
+
54
+ fg_mask = backSub.apply(frame)
55
+ retval, mask_thresh = cv2.threshold(fg_mask, 200, 255, cv2.THRESH_BINARY)
56
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
57
+ mask_eroded = cv2.morphologyEx(mask_thresh, cv2.MORPH_OPEN, kernel)
58
+
59
+ contours, _ = cv2.findContours(mask_eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
60
+
61
+ min_contour_area = contour_thresh
62
+ large_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > min_contour_area]
63
+
64
+ frame_out = frame.copy()
65
+
66
+ # Draw bounding boxes only on non-human moving objects
67
+ for cnt in large_contours:
68
+ x, y, w, h = cv2.boundingRect(cnt)
69
+ for (x1, y1, x2, y2) in detected_boxes:
70
+ if x > x1 and y > y1 and (x + w) < x2 and (y + h) < y2: # Ensure it's inside an object
71
+ frame_out = cv2.rectangle(frame_out, (x, y), (x + w, y + h), (0, 0, 200), 3)
72
+
73
+ frame_out_final = cv2.cvtColor(frame_out, cv2.COLOR_BGR2RGB)
74
+ out.write(frame_out)
75
+
76
+ if not count % 12:
77
+ yield frame_out_final, None
78
+ count += 1
79
+
80
  cap.release()
81
  out.release()
82
  cv2.destroyAllWindows()
83
  yield None, output_video
84
 
85
+ # Gradio interface
86
  input_video = gr.Video(label="Input Video")
87
+ contour_thresh = gr.Slider(0, 10000, value=4, label="Contour Threshold", info="Adjust the threshold based on package size.")
88
  output_frames = gr.Image(label="Output Frames")
89
  output_video_file = gr.Video(label="Output Video")
90
 
 
92
  fn=vid_inf,
93
  inputs=[input_video, contour_thresh],
94
  outputs=[output_frames, output_video_file],
95
+ title="Package Tracking using YOLOv8 & Motion Detection",
96
+ description="A smart video analysis tool that uses YOLOv8 to track packages while ignoring human movement.",
97
  allow_flagging="never",
98
+ examples=[["./sample/car.mp4", "1000"], ["./sample/motion_test.mp4", "5000"], ["./sample/home.mp4", "4500"]],
99
  cache_examples=False,
100
  )
 
101
  app.queue().launch()
102
+