Abhinav-hf commited on
Commit
b23c835
·
verified ·
1 Parent(s): 4730fe6

Update Backend/BrandRecognition/Dynamic/Brand_Count_Vid.py

Browse files
Backend/BrandRecognition/Dynamic/Brand_Count_Vid.py CHANGED
@@ -3,6 +3,118 @@ import numpy as np
3
  from collections import deque, defaultdict
4
  from ultralytics import YOLO
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
 
8
  def iou(box1, box2):
@@ -25,8 +137,8 @@ def smooth_box(box_history):
25
 
26
  def process_video(input_path, output_path):
27
  model = YOLO('Weights/kitkat_s.pt')
28
- cap = cv2.VideoCapture(input_path)
29
 
 
30
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
31
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
32
  fps = int(cap.get(cv2.CAP_PROP_FPS))
@@ -126,10 +238,21 @@ def process_video(input_path, output_path):
126
 
127
  def annotate_video(input_video):
128
  output_path = 'annotated_output.mp4'
129
- confirmed_items = process_video(input_video, output_path)
 
 
 
 
 
 
 
 
 
 
130
 
131
  item_list = [(brand, quantity) for brand, quantity in confirmed_items.items()]
132
 
133
  status_message = "Video processed successfully!"
134
 
135
- return output_path, item_list, status_message
 
 
3
  from collections import deque, defaultdict
4
  from ultralytics import YOLO
5
 
6
+ def comprehensive_frame_reduction(
7
+ input_path,
8
+ output_path,
9
+ target_frame_count=100,
10
+ scene_change_weight=0.5,
11
+ motion_weight=0.3,
12
+ color_weight=0.2
13
+ ):
14
+ """
15
+ Comprehensively reduce video frames while maintaining video content representation.
16
+
17
+ Args:
18
+ input_path (str): Path to input video
19
+ output_path (str): Path to output video
20
+ target_frame_count (int): Desired number of frames in reduced video
21
+ scene_change_weight (float): Weight for scene change importance
22
+ motion_weight (float): Weight for motion importance
23
+ color_weight (float): Weight for color distribution importance
24
+
25
+ Returns:
26
+ List of selected frame indices
27
+ """
28
+ # Open the video
29
+ cap = cv2.VideoCapture(input_path)
30
+
31
+ # Video properties
32
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
33
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
34
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
35
+ fps = cap.get(cv2.CAP_PROP_FPS)
36
+
37
+ # Output video writer
38
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
39
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
40
+
41
+ # Preprocessing
42
+ frames = []
43
+ frame_grays = []
44
+ frame_hists = []
45
+
46
+ # Read all frames
47
+ while True:
48
+ ret, frame = cap.read()
49
+ if not ret:
50
+ break
51
+
52
+ frames.append(frame)
53
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
54
+ frame_grays.append(gray)
55
+
56
+ # Compute color histogram
57
+ hist = cv2.calcHist([frame], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
58
+ frame_hists.append(hist.flatten())
59
+
60
+ cap.release()
61
+
62
+ # Compute frame importance scores
63
+ importance_scores = np.zeros(len(frames))
64
+
65
+ # Scene change detection
66
+ scene_changes = np.zeros(len(frames))
67
+ for i in range(1, len(frames)):
68
+ # Compute frame difference
69
+ frame_diff = cv2.absdiff(frame_grays[i-1], frame_grays[i])
70
+ scene_changes[i] = np.sum(frame_diff) / (width * height)
71
+
72
+ # Motion estimation (using frame differences)
73
+ motion_scores = np.zeros(len(frames))
74
+ for i in range(2, len(frames)):
75
+ # Compute motion between consecutive frames
76
+ motion_diff = cv2.absdiff(frame_grays[i-2], frame_grays[i])
77
+ motion_scores[i] = np.sum(motion_diff) / (width * height)
78
+
79
+ # Color distribution variance
80
+ color_variance = np.zeros(len(frames))
81
+ for i in range(len(frames)):
82
+ # Compute color histogram difference from average
83
+ color_variance[i] = np.linalg.norm(frame_hists[i] - np.mean(frame_hists, axis=0))
84
+
85
+ # Normalize scores
86
+ scene_changes = (scene_changes - scene_changes.min()) / (scene_changes.max() - scene_changes.min())
87
+ motion_scores = (motion_scores - motion_scores.min()) / (motion_scores.max() - motion_scores.min())
88
+ color_variance = (color_variance - color_variance.min()) / (color_variance.max() - color_variance.min())
89
+
90
+ # Compute importance scores
91
+ importance_scores = (
92
+ scene_change_weight * scene_changes +
93
+ motion_weight * motion_scores +
94
+ color_weight * color_variance
95
+ )
96
+
97
+ # Ensure even distribution across video
98
+ segment_size = len(frames) // target_frame_count
99
+ selected_frames = []
100
+
101
+ for segment in range(target_frame_count):
102
+ start = segment * segment_size
103
+ end = (segment + 1) * segment_size if segment < target_frame_count - 1 else len(frames)
104
+
105
+ # Find the most important frame in this segment
106
+ segment_scores = importance_scores[start:end]
107
+ best_frame_idx = np.argmax(segment_scores) + start
108
+
109
+ selected_frames.append(best_frame_idx)
110
+
111
+ # Write selected frames to output video
112
+ out.write(frames[best_frame_idx])
113
+
114
+ out.release()
115
+
116
+ print(f"Reduced from {total_frames} to {len(selected_frames)} frames")
117
+ return selected_frames
118
 
119
 
120
  def iou(box1, box2):
 
137
 
138
  def process_video(input_path, output_path):
139
  model = YOLO('Weights/kitkat_s.pt')
 
140
 
141
+ cap = cv2.VideoCapture(input_path)
142
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
143
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
144
  fps = int(cap.get(cv2.CAP_PROP_FPS))
 
238
 
239
  def annotate_video(input_video):
240
  output_path = 'annotated_output.mp4'
241
+
242
+ comprehensive_frame_reduction(
243
+ input_video,
244
+ 'reduced_video.mp4',
245
+ target_frame_count=100,
246
+ scene_change_weight=0.5,
247
+ motion_weight=0.3,
248
+ color_weight=0.2
249
+ )
250
+
251
+ confirmed_items = process_video('reduced_video.mp4', output_path)
252
 
253
  item_list = [(brand, quantity) for brand, quantity in confirmed_items.items()]
254
 
255
  status_message = "Video processed successfully!"
256
 
257
+ return output_path, item_list, status_message
258
+