Candle commited on
Commit
07ee382
·
1 Parent(s): 29736b1
Files changed (1) hide show
  1. detect_scene.py +35 -22
detect_scene.py CHANGED
@@ -97,6 +97,14 @@ def save_timeline_jpg(frames, scene_change_indices, filename, interval=5, roi_ra
97
  first_frames = set(idx + 1 for idx in scene_change_indices if idx + 1 < len(frames))
98
 
99
  # Draw thumbnails and annotate
 
 
 
 
 
 
 
 
100
  for i, fidx in enumerate(frames_to_render):
101
  thumb = frames[fidx].resize((32, 32))
102
  imagebox = OffsetImage(np.array(thumb), zoom=0.7)
@@ -115,7 +123,12 @@ def save_timeline_jpg(frames, scene_change_indices, filename, interval=5, roi_ra
115
  bboxprops=bboxprops
116
  )
117
  ax.add_artist(ab)
118
- ax.text(x_positions[i], 0.25, str(fidx), ha='center', va='center', fontsize=9, color='black', bbox=dict(facecolor='white', edgecolor='none', alpha=0.8, boxstyle='round,pad=0.2'))
 
 
 
 
 
119
 
120
  if title:
121
  ax.text(0, 0.95, title, fontsize=12, ha='left', va='top', color='navy')
@@ -123,24 +136,18 @@ def save_timeline_jpg(frames, scene_change_indices, filename, interval=5, roi_ra
123
  plt.savefig(filename, dpi=150)
124
  plt.close(fig)
125
 
126
- def load_webp_animation(filepath):
127
- from PIL import Image
128
  import numpy as np
129
- im = Image.open(filepath)
130
- frames = []
131
- try:
132
- while True:
133
- frame = im.convert("RGB").resize((48, 27), resample=Image.Resampling.BILINEAR) # resize to 48x27 (W x H)
134
- arr = np.array(frame, dtype=np.uint8)
135
- frames.append(torch.from_numpy(arr))
136
- im.seek(im.tell() + 1)
137
- except EOFError:
138
- pass
139
- video_tensor = torch.stack(frames) # shape: num_frames x 27 x 48 x 3
140
- return video_tensor
141
-
142
- def detect_scene_changes(filepath):
143
- video_tensor = load_webp_animation(filepath)
144
  video_tensor = video_tensor.unsqueeze(0).to(device) # shape: 1 x num_frames x H x W x 3
145
  with torch.no_grad():
146
  single_frame_pred, all_frame_pred = model(video_tensor)
@@ -155,6 +162,7 @@ def detect_scene_changes(filepath):
155
  "num_frames": video_tensor.shape[1]
156
  }
157
 
 
158
  if __name__ == "__main__":
159
  device = get_best_device()
160
  print(f"Using device: {device}")
@@ -165,15 +173,20 @@ if __name__ == "__main__":
165
  for file in files:
166
  match = re.search(r"sample-(\d+)", file.name)
167
  sample_num = match.group(1) if match else "unknown"
168
- result = detect_scene_changes(file)
169
- # Save results to JSON
 
170
  json_filename = file.parent / f"sample-{sample_num}.json"
171
  with open(json_filename, "w") as f:
172
- json.dump({"scene_change_indices": result["scene_change_indices"]}, f, indent=2)
 
 
 
 
 
173
  # Save timeline JPG
174
  timeline_filename = file.parent / f"sample-{sample_num}.timeline.jpg"
175
  plot_filename = file.parent / f"sample-{sample_num}.plot.jpg"
176
- original_frames = load_original_frames(file)
177
  save_timeline_jpg(
178
  frames=original_frames,
179
  scene_change_indices=result["scene_change_indices"],
 
97
  first_frames = set(idx + 1 for idx in scene_change_indices if idx + 1 < len(frames))
98
 
99
  # Draw thumbnails and annotate
100
+ # Optionally, get single_frame_pred if passed as a kwarg
101
+ single_frame_pred = None
102
+ import inspect
103
+ if 'single_frame_pred' in inspect.signature(save_timeline_jpg).parameters:
104
+ single_frame_pred = locals().get('single_frame_pred', None)
105
+
106
+ # But better: pass single_frame_pred as an argument (see below for main loop update)
107
+
108
  for i, fidx in enumerate(frames_to_render):
109
  thumb = frames[fidx].resize((32, 32))
110
  imagebox = OffsetImage(np.array(thumb), zoom=0.7)
 
123
  bboxprops=bboxprops
124
  )
125
  ax.add_artist(ab)
126
+ # Draw frame index
127
+ ax.text(x_positions[i], 0.32, str(fidx), ha='center', va='center', fontsize=9, color='black', bbox=dict(facecolor='white', edgecolor='none', alpha=0.8, boxstyle='round,pad=0.2'))
128
+ # Draw prediction value below frame index
129
+ if 'single_frame_pred' in locals() and single_frame_pred is not None:
130
+ pred_val = single_frame_pred[fidx]
131
+ ax.text(x_positions[i], 0.18, f"{pred_val:.2f}", ha='center', va='center', fontsize=8, color='blue', bbox=dict(facecolor='white', edgecolor='none', alpha=0.7, boxstyle='round,pad=0.2'))
132
 
133
  if title:
134
  ax.text(0, 0.95, title, fontsize=12, ha='left', va='top', color='navy')
 
136
  plt.savefig(filename, dpi=150)
137
  plt.close(fig)
138
 
139
+ def frames_to_video_tensor(frames):
140
+ """Convert a list of PIL frames to a torch tensor of shape (num_frames, 27, 48, 3) and dtype uint8."""
141
  import numpy as np
142
+ from PIL import Image
143
+ processed = []
144
+ for frame in frames:
145
+ arr = np.array(frame.resize((48, 27), resample=Image.Resampling.BILINEAR), dtype=np.uint8)
146
+ processed.append(torch.from_numpy(arr))
147
+ return torch.stack(processed)
148
+
149
+ def detect_scene_changes(frames):
150
+ video_tensor = frames_to_video_tensor(frames)
 
 
 
 
 
 
151
  video_tensor = video_tensor.unsqueeze(0).to(device) # shape: 1 x num_frames x H x W x 3
152
  with torch.no_grad():
153
  single_frame_pred, all_frame_pred = model(video_tensor)
 
162
  "num_frames": video_tensor.shape[1]
163
  }
164
 
165
+
166
  if __name__ == "__main__":
167
  device = get_best_device()
168
  print(f"Using device: {device}")
 
173
  for file in files:
174
  match = re.search(r"sample-(\d+)", file.name)
175
  sample_num = match.group(1) if match else "unknown"
176
+ original_frames = load_original_frames(file)
177
+ result = detect_scene_changes(original_frames)
178
+ # Save results to JSON (include threshold and predictions)
179
  json_filename = file.parent / f"sample-{sample_num}.json"
180
  with open(json_filename, "w") as f:
181
+ json.dump({
182
+ "scene_change_indices": result["scene_change_indices"],
183
+ "threshold": SCENE_CUT_THRESHOLD,
184
+ "single_frame_pred": result["single_frame_pred"].tolist(),
185
+ "all_frame_pred": result["all_frame_pred"].tolist()
186
+ }, f, indent=2)
187
  # Save timeline JPG
188
  timeline_filename = file.parent / f"sample-{sample_num}.timeline.jpg"
189
  plot_filename = file.parent / f"sample-{sample_num}.plot.jpg"
 
190
  save_timeline_jpg(
191
  frames=original_frames,
192
  scene_change_indices=result["scene_change_indices"],