Candle commited on
Commit ·
1b7edb8
1
Parent(s): 8b689fc
k-1 cut logic
Browse files- detect_scene.py +63 -17
detect_scene.py
CHANGED
|
@@ -5,16 +5,20 @@ from transnetv2_pytorch import TransNetV2
|
|
| 5 |
import json
|
| 6 |
import re
|
| 7 |
|
| 8 |
-
SCENE_CUT_THRESHOLD = 0.09
|
|
|
|
|
|
|
|
|
|
| 9 |
data_dir = Path("data/animations")
|
| 10 |
-
files = sorted(data_dir.glob("sample
|
|
|
|
| 11 |
|
| 12 |
def get_best_device():
|
| 13 |
if torch.cuda.is_available():
|
| 14 |
return torch.device("cuda")
|
| 15 |
elif torch.backends.mps.is_available():
|
| 16 |
-
|
| 17 |
-
return torch.device("cpu")
|
| 18 |
else:
|
| 19 |
return torch.device("cpu")
|
| 20 |
|
|
@@ -145,18 +149,42 @@ def detect_scene_changes(frames):
|
|
| 145 |
video_tensor = frames_to_video_tensor(frames)
|
| 146 |
video_tensor = video_tensor.unsqueeze(0).to(device) # shape: 1 x num_frames x H x W x 3
|
| 147 |
with torch.no_grad():
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
|
|
|
| 153 |
return {
|
| 154 |
"single_frame_pred": single_frame_pred,
|
| 155 |
"all_frame_pred": all_frame_pred_np,
|
| 156 |
-
"
|
| 157 |
-
"
|
| 158 |
}
|
| 159 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
if __name__ == "__main__":
|
| 162 |
device = get_best_device()
|
|
@@ -165,26 +193,44 @@ if __name__ == "__main__":
|
|
| 165 |
state_dict = torch.load("transnetv2-pytorch-weights.pth")
|
| 166 |
model.load_state_dict(state_dict)
|
| 167 |
model.eval().to(device)
|
|
|
|
| 168 |
for file in files:
|
| 169 |
match = re.search(r"sample-(\d+)", file.name)
|
| 170 |
sample_num = match.group(1) if match else "unknown"
|
| 171 |
original_frames = load_original_frames(file)
|
| 172 |
-
result =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
# Save results to JSON (include threshold and predictions)
|
| 174 |
json_filename = file.parent / f"sample-{sample_num}.json"
|
| 175 |
with open(json_filename, "w") as f:
|
| 176 |
json.dump({
|
| 177 |
-
"
|
| 178 |
-
"
|
| 179 |
-
"
|
| 180 |
-
"
|
| 181 |
}, f, indent=2)
|
| 182 |
# Save timeline JPG
|
| 183 |
timeline_filename = file.parent / f"sample-{sample_num}.timeline.jpg"
|
| 184 |
plot_filename = file.parent / f"sample-{sample_num}.plot.jpg"
|
| 185 |
save_timeline_jpg(
|
| 186 |
frames=original_frames,
|
| 187 |
-
scene_change_indices=
|
| 188 |
filename=timeline_filename,
|
| 189 |
interval=10,
|
| 190 |
roi_radius=2,
|
|
|
|
| 5 |
import json
|
| 6 |
import re
|
| 7 |
|
| 8 |
+
# SCENE_CUT_THRESHOLD = 0.09
|
| 9 |
+
K = 3 # Number of cuts to detect
|
| 10 |
+
MIN_DURATION_FRAMES = 2
|
| 11 |
+
|
| 12 |
data_dir = Path("data/animations")
|
| 13 |
+
files = sorted(data_dir.glob("sample-*.webp"))
|
| 14 |
+
print(f"Found {len(files)} files to process.")
|
| 15 |
|
| 16 |
def get_best_device():
|
| 17 |
if torch.cuda.is_available():
|
| 18 |
return torch.device("cuda")
|
| 19 |
elif torch.backends.mps.is_available():
|
| 20 |
+
return torch.device("mps")
|
| 21 |
+
# return torch.device("cpu")
|
| 22 |
else:
|
| 23 |
return torch.device("cpu")
|
| 24 |
|
|
|
|
| 149 |
video_tensor = frames_to_video_tensor(frames)
|
| 150 |
video_tensor = video_tensor.unsqueeze(0).to(device) # shape: 1 x num_frames x H x W x 3
|
| 151 |
with torch.no_grad():
|
| 152 |
+
single_frame_logits, all_frame_logits = model(video_tensor)
|
| 153 |
+
# Squeeze last dimension so output is flat (num_frames,)
|
| 154 |
+
single_frame_logits_np = single_frame_logits.cpu().numpy().squeeze() # shape: (num_frames,)
|
| 155 |
+
all_frame_logits_np = all_frame_logits["many_hot"].cpu().numpy().squeeze() # shape: (num_frames,)
|
| 156 |
+
single_frame_pred = torch.sigmoid(single_frame_logits).cpu().numpy().squeeze() # shape: (num_frames,)
|
| 157 |
+
all_frame_pred_np = torch.sigmoid(all_frame_logits["many_hot"]).cpu().numpy().squeeze() # shape: (num_frames,)
|
| 158 |
return {
|
| 159 |
"single_frame_pred": single_frame_pred,
|
| 160 |
"all_frame_pred": all_frame_pred_np,
|
| 161 |
+
"single_frame_logits": single_frame_logits_np,
|
| 162 |
+
"all_frame_logits": all_frame_logits_np,
|
| 163 |
}
|
| 164 |
|
| 165 |
+
def cached_detect_scene_changes(file, original_frames):
|
| 166 |
+
"""Detect scene changes with caching to avoid redundant computation."""
|
| 167 |
+
match = re.search(r"sample-(\d+)", file.name)
|
| 168 |
+
sample_num = match.group(1) if match else "unknown"
|
| 169 |
+
transnetv2_json = file.parent / f"sample-{sample_num}.transnetv2.json"
|
| 170 |
+
if transnetv2_json.exists():
|
| 171 |
+
with open(transnetv2_json, "r") as f:
|
| 172 |
+
result = json.load(f)
|
| 173 |
+
result["single_frame_pred"] = np.array(result["single_frame_pred"])
|
| 174 |
+
result["all_frame_pred"] = np.array(result["all_frame_pred"])
|
| 175 |
+
result["single_frame_logits"] = np.array(result["single_frame_logits"])
|
| 176 |
+
result["all_frame_logits"] = np.array(result["all_frame_logits"])
|
| 177 |
+
else:
|
| 178 |
+
result = detect_scene_changes(original_frames)
|
| 179 |
+
# Save model output to cache file
|
| 180 |
+
with open(transnetv2_json, "w") as f:
|
| 181 |
+
json.dump({
|
| 182 |
+
"single_frame_pred": result["single_frame_pred"].tolist(),
|
| 183 |
+
"all_frame_pred": result["all_frame_pred"].tolist(),
|
| 184 |
+
"single_frame_logits": result["single_frame_logits"].tolist(),
|
| 185 |
+
"all_frame_logits": result["all_frame_logits"].tolist()
|
| 186 |
+
}, f, indent=2)
|
| 187 |
+
return result
|
| 188 |
|
| 189 |
if __name__ == "__main__":
|
| 190 |
device = get_best_device()
|
|
|
|
| 193 |
state_dict = torch.load("transnetv2-pytorch-weights.pth")
|
| 194 |
model.load_state_dict(state_dict)
|
| 195 |
model.eval().to(device)
|
| 196 |
+
|
| 197 |
for file in files:
|
| 198 |
match = re.search(r"sample-(\d+)", file.name)
|
| 199 |
sample_num = match.group(1) if match else "unknown"
|
| 200 |
original_frames = load_original_frames(file)
|
| 201 |
+
result = cached_detect_scene_changes(file, original_frames)
|
| 202 |
+
# scene_change_indices = [i for i, p in enumerate(result["single_frame_pred"]) if p >= SCENE_CUT_THRESHOLD]
|
| 203 |
+
# Detect top-K-1 scene changes
|
| 204 |
+
single_frame_pred = result["single_frame_pred"]
|
| 205 |
+
|
| 206 |
+
# Ignore first and last frame when selecting scene changes, and enforce MIN_DURATION_FRAMES between cuts
|
| 207 |
+
valid_indices = np.arange(1, len(single_frame_pred) - 1)
|
| 208 |
+
valid_preds = single_frame_pred[1:-1]
|
| 209 |
+
# Sort indices by prediction score (descending)
|
| 210 |
+
sorted_indices = valid_indices[np.argsort(valid_preds)[::-1]]
|
| 211 |
+
scene_change_indices = []
|
| 212 |
+
for idx in sorted_indices:
|
| 213 |
+
if all(abs(idx - prev) >= MIN_DURATION_FRAMES for prev in scene_change_indices):
|
| 214 |
+
scene_change_indices.append(int(idx))
|
| 215 |
+
if len(scene_change_indices) >= (K - 1):
|
| 216 |
+
break
|
| 217 |
+
|
| 218 |
+
print(f"File: {file.name}, Frames: {len(original_frames)}, Scene Changes: {len(scene_change_indices)}")
|
| 219 |
# Save results to JSON (include threshold and predictions)
|
| 220 |
json_filename = file.parent / f"sample-{sample_num}.json"
|
| 221 |
with open(json_filename, "w") as f:
|
| 222 |
json.dump({
|
| 223 |
+
"num_frames": len(original_frames),
|
| 224 |
+
"scene_change_indices": scene_change_indices,
|
| 225 |
+
# "threshold": SCENE_CUT_THRESHOLD
|
| 226 |
+
"k": K,
|
| 227 |
}, f, indent=2)
|
| 228 |
# Save timeline JPG
|
| 229 |
timeline_filename = file.parent / f"sample-{sample_num}.timeline.jpg"
|
| 230 |
plot_filename = file.parent / f"sample-{sample_num}.plot.jpg"
|
| 231 |
save_timeline_jpg(
|
| 232 |
frames=original_frames,
|
| 233 |
+
scene_change_indices=scene_change_indices,
|
| 234 |
filename=timeline_filename,
|
| 235 |
interval=10,
|
| 236 |
roi_radius=2,
|