Spaces:
Sleeping
Sleeping
Update video_processor.py
Browse files- video_processor.py +32 -4
video_processor.py
CHANGED
|
@@ -1,27 +1,55 @@
|
|
| 1 |
import cv2
|
| 2 |
-
import
|
| 3 |
-
import tempfile
|
| 4 |
from detector import LBWDetector
|
| 5 |
-
from utils import draw_boxes
|
| 6 |
|
| 7 |
def process_video(video_path, output_path="output.mp4"):
|
| 8 |
detector = LBWDetector()
|
| 9 |
cap = cv2.VideoCapture(video_path)
|
| 10 |
|
| 11 |
-
width
|
| 12 |
height = int(cap.get(4))
|
| 13 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 14 |
|
| 15 |
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
while cap.isOpened():
|
| 18 |
ret, frame = cap.read()
|
| 19 |
if not ret:
|
| 20 |
break
|
|
|
|
| 21 |
detections, class_names = detector.detect_objects(frame)
|
|
|
|
|
|
|
|
|
|
| 22 |
frame = draw_boxes(frame, detections, class_names)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
out.write(frame)
|
| 24 |
|
| 25 |
cap.release()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
out.release()
|
| 27 |
return output_path
|
|
|
|
| 1 |
import cv2
|
| 2 |
+
import numpy as np
|
|
|
|
| 3 |
from detector import LBWDetector
|
| 4 |
+
from utils import draw_boxes, overlay_decision_text
|
| 5 |
|
| 6 |
def process_video(video_path, output_path="output.mp4"):
|
| 7 |
detector = LBWDetector()
|
| 8 |
cap = cv2.VideoCapture(video_path)
|
| 9 |
|
| 10 |
+
width = int(cap.get(3))
|
| 11 |
height = int(cap.get(4))
|
| 12 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 13 |
|
| 14 |
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
|
| 15 |
|
| 16 |
+
impact_frame = None
|
| 17 |
+
impact_point = None
|
| 18 |
+
hit_stumps = False
|
| 19 |
+
|
| 20 |
while cap.isOpened():
|
| 21 |
ret, frame = cap.read()
|
| 22 |
if not ret:
|
| 23 |
break
|
| 24 |
+
|
| 25 |
detections, class_names = detector.detect_objects(frame)
|
| 26 |
+
labels = [class_names[int(cls_id)] for *_, cls_id in detections]
|
| 27 |
+
|
| 28 |
+
# Draw overlays
|
| 29 |
frame = draw_boxes(frame, detections, class_names)
|
| 30 |
+
|
| 31 |
+
# Detect impact frame
|
| 32 |
+
if 'pad' in labels and 'ball' in labels:
|
| 33 |
+
impact_frame = frame.copy()
|
| 34 |
+
# Assume impact point is ball's center in this frame
|
| 35 |
+
for x1, y1, x2, y2, conf, cls_id in detections:
|
| 36 |
+
if class_names[int(cls_id)] == 'ball':
|
| 37 |
+
impact_point = ((x1 + x2) / 2, (y1 + y2) / 2)
|
| 38 |
+
break
|
| 39 |
+
|
| 40 |
+
# Check if ball is later detected near stumps
|
| 41 |
+
if 'stumps' in labels and 'ball' in labels:
|
| 42 |
+
hit_stumps = True
|
| 43 |
+
|
| 44 |
out.write(frame)
|
| 45 |
|
| 46 |
cap.release()
|
| 47 |
+
|
| 48 |
+
# Append decision screen frame
|
| 49 |
+
decision_frame = np.zeros((height, width, 3), dtype=np.uint8)
|
| 50 |
+
decision_frame = overlay_decision_text(decision_frame, impact_point, hit_stumps, impact_frame is not None)
|
| 51 |
+
for _ in range(int(fps * 2)): # show for 2 seconds
|
| 52 |
+
out.write(decision_frame)
|
| 53 |
+
|
| 54 |
out.release()
|
| 55 |
return output_path
|