viswanani commited on
Commit
936b6dc
·
verified ·
1 Parent(s): 1658638

Update drs_engine.py

Browse files
Files changed (1) hide show
  1. drs_engine.py +46 -30
drs_engine.py CHANGED
@@ -3,32 +3,43 @@ import os
3
  import tempfile
4
  import math
5
  from ultralytics import YOLO
 
 
6
 
7
- model = YOLO("best.pt") # fallback to yolov8n.pt if needed
8
 
9
- # CONFIGS
10
- PITCH_LENGTH_METERS = 20.12 # meters
11
- FRAME_WIDTH = 1280 # assume standard
12
- FPS = 30 # assume 30fps if not extracted
13
 
14
- # Distance conversion: use visual calibration if you want real scale
15
  def estimate_speed(p1, p2, fps):
16
  pixel_distance = math.hypot(p2[0] - p1[0], p2[1] - p1[1])
17
- meters_per_pixel = 0.03 # approximate scale
18
  distance_m = pixel_distance * meters_per_pixel
19
  speed_mps = distance_m * fps
20
- return speed_mps * 3.6 # to km/h
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def process_video(video_path):
23
  cap = cv2.VideoCapture(video_path)
24
  if not cap.isOpened():
25
  raise Exception("Video open failed")
26
 
27
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
28
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
29
- fps = cap.get(cv2.CAP_PROP_FPS) or FPS
30
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
31
-
32
  temp_out = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
33
  out = cv2.VideoWriter(temp_out, fourcc, fps, (width, height))
34
 
@@ -36,9 +47,9 @@ def process_video(video_path):
36
  bounce_frame = None
37
  verdict = "NOT OUT"
38
  bounce_detected = False
39
-
40
  prev_center = None
41
- for f_idx in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):
 
42
  ret, frame = cap.read()
43
  if not ret:
44
  break
@@ -47,43 +58,47 @@ def process_video(video_path):
47
  ball_box = None
48
  for box in results[0].boxes:
49
  cls = int(box.cls[0])
50
- if cls == 0: # cricket ball class
51
  x1, y1, x2, y2 = box.xyxy[0].cpu().numpy().astype(int)
52
  cx, cy = int((x1 + x2)/2), int((y1 + y2)/2)
53
  ball_box = (cx, cy)
54
- trajectory.append((f_idx, cx, cy))
55
  cv2.circle(frame, (cx, cy), 8, (0, 0, 255), -1)
56
-
57
- # Simple bounce logic: sudden increase in Y
58
  if prev_center and not bounce_detected:
59
  dy = cy - prev_center[1]
60
  if dy > 15:
61
- bounce_frame = f_idx
62
  bounce_detected = True
63
  cv2.putText(frame, "Bounce!", (cx, cy - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,255), 2)
64
  prev_center = (cx, cy)
65
- break # take one ball only
66
 
67
- # Draw trajectory line
68
  for i in range(1, len(trajectory)):
69
- _, x1, y1 = trajectory[i-1]
70
- _, x2, y2 = trajectory[i]
71
  cv2.line(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
72
 
73
- # Add speed (estimate from last 2)
74
  if len(trajectory) >= 2:
75
- _, x1, y1 = trajectory[-2]
76
- _, x2, y2 = trajectory[-1]
77
  speed_kmh = estimate_speed((x1, y1), (x2, y2), fps)
78
  cv2.putText(frame, f"Speed: {int(speed_kmh)} km/h", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
79
 
80
- # OUT/NOT OUT logic (basic): Assume bounce before batsman, and impact near stumps
81
  if bounce_detected and trajectory:
82
- last_x, last_y = trajectory[-1][1:]
83
- if 550 < last_x < 750: # center line assumed
84
  verdict = "OUT"
85
 
86
- # Add verdict text
 
 
 
 
 
 
 
 
 
 
87
  cv2.putText(frame, f"Decision: {verdict}", (30, height - 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
88
  (0,255,0) if verdict == "NOT OUT" else (0,0,255), 3)
89
 
@@ -91,4 +106,5 @@ def process_video(video_path):
91
 
92
  cap.release()
93
  out.release()
94
- return temp_out
 
 
3
  import tempfile
4
  import math
5
  from ultralytics import YOLO
6
+ from pydub import AudioSegment
7
+ import ffmpeg
8
 
9
+ model = YOLO("best.pt")
10
 
11
+ PITCH_LENGTH_METERS = 20.12
12
+ FPS = 30
 
 
13
 
 
14
  def estimate_speed(p1, p2, fps):
15
  pixel_distance = math.hypot(p2[0] - p1[0], p2[1] - p1[1])
16
+ meters_per_pixel = 0.03
17
  distance_m = pixel_distance * meters_per_pixel
18
  speed_mps = distance_m * fps
19
+ return speed_mps * 3.6
20
+
21
+ def add_voice_to_video(video_path, verdict):
22
+ audio_file = "out.mp3" if verdict == "OUT" else "not_out.mp3"
23
+ temp_audio = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False).name
24
+ AudioSegment.from_file(audio_file).export(temp_audio, format="mp3")
25
+ final_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
26
+ (
27
+ ffmpeg
28
+ .input(video_path)
29
+ .output(final_output, vf='scale=trunc(iw/2)*2:trunc(ih/2)*2', audio=temp_audio, vcodec='copy', acodec='aac', strict='experimental')
30
+ .run(overwrite_output=True)
31
+ )
32
+ return final_output
33
 
34
  def process_video(video_path):
35
  cap = cv2.VideoCapture(video_path)
36
  if not cap.isOpened():
37
  raise Exception("Video open failed")
38
 
39
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
40
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
41
+ fps = cap.get(cv2.CAP_PROP_FPS) or FPS
42
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
 
43
  temp_out = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
44
  out = cv2.VideoWriter(temp_out, fourcc, fps, (width, height))
45
 
 
47
  bounce_frame = None
48
  verdict = "NOT OUT"
49
  bounce_detected = False
 
50
  prev_center = None
51
+
52
+ for _ in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):
53
  ret, frame = cap.read()
54
  if not ret:
55
  break
 
58
  ball_box = None
59
  for box in results[0].boxes:
60
  cls = int(box.cls[0])
61
+ if cls == 0:
62
  x1, y1, x2, y2 = box.xyxy[0].cpu().numpy().astype(int)
63
  cx, cy = int((x1 + x2)/2), int((y1 + y2)/2)
64
  ball_box = (cx, cy)
65
+ trajectory.append((cx, cy))
66
  cv2.circle(frame, (cx, cy), 8, (0, 0, 255), -1)
 
 
67
  if prev_center and not bounce_detected:
68
  dy = cy - prev_center[1]
69
  if dy > 15:
 
70
  bounce_detected = True
71
  cv2.putText(frame, "Bounce!", (cx, cy - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,255), 2)
72
  prev_center = (cx, cy)
73
+ break
74
 
 
75
  for i in range(1, len(trajectory)):
76
+ x1, y1 = trajectory[i-1]
77
+ x2, y2 = trajectory[i]
78
  cv2.line(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
79
 
 
80
  if len(trajectory) >= 2:
81
+ x1, y1 = trajectory[-2]
82
+ x2, y2 = trajectory[-1]
83
  speed_kmh = estimate_speed((x1, y1), (x2, y2), fps)
84
  cv2.putText(frame, f"Speed: {int(speed_kmh)} km/h", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
85
 
 
86
  if bounce_detected and trajectory:
87
+ last_x, last_y = trajectory[-1]
88
+ if 550 < last_x < 750:
89
  verdict = "OUT"
90
 
91
+ # Draw pitch zones
92
+ zone_top = int(height * 0.55)
93
+ zone_bottom = int(height * 0.95)
94
+ zone_width = width // 3
95
+ cv2.rectangle(frame, (0, zone_top), (zone_width, zone_bottom), (255, 200, 200), 1)
96
+ cv2.putText(frame, "Leg", (zone_width//4, zone_top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 200, 200), 1)
97
+ cv2.rectangle(frame, (zone_width, zone_top), (zone_width*2, zone_bottom), (200, 255, 200), 1)
98
+ cv2.putText(frame, "Middle", (zone_width + 10, zone_top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 255, 200), 1)
99
+ cv2.rectangle(frame, (zone_width*2, zone_top), (width, zone_bottom), (200, 200, 255), 1)
100
+ cv2.putText(frame, "Off", (zone_width*2 + 10, zone_top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 200, 255), 1)
101
+
102
  cv2.putText(frame, f"Decision: {verdict}", (30, height - 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
103
  (0,255,0) if verdict == "NOT OUT" else (0,0,255), 3)
104
 
 
106
 
107
  cap.release()
108
  out.release()
109
+
110
+ return add_voice_to_video(temp_out, verdict)