sprite-dx-data / detect_scene.py
Candle
stuff
3927f54
import torch
import numpy as np
from pathlib import Path
from transnetv2_pytorch import TransNetV2
import json
import re
# SCENE_CUT_THRESHOLD = 0.09
K = 3 # Number of cuts to detect
MIN_DURATION_FRAMES = 2
MIN_CONFIDENCE = 0.02
data_dir = Path("data/animations")
files = sorted(data_dir.glob("sample-*.webp"))
print(f"Found {len(files)} files to process.")
def get_best_device():
if torch.cuda.is_available():
return torch.device("cuda")
elif torch.backends.mps.is_available():
return torch.device("mps")
# return torch.device("cpu")
else:
return torch.device("cpu")
def load_original_frames(filepath):
"""Load original frames from an animated webp file as PIL Images."""
from PIL import Image
im = Image.open(filepath)
frames = []
try:
while True:
frames.append(im.convert("RGB"))
im.seek(im.tell() + 1)
except EOFError:
pass
return frames
def save_prediction_plot(single_frame_pred, original_frames, filename, interval=5, title=None):
"""
Save a plot of single frame predictions with thumbnails annotated at regular intervals.
"""
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import numpy as np
plt.figure(figsize=(12, 4))
if title:
plt.title(title)
plt.plot(single_frame_pred)
ax = plt.gca()
# Add thumbnails at regular intervals
for idx in range(0, len(original_frames), interval):
thumb = original_frames[idx].resize((64, 64))
imagebox = OffsetImage(np.array(thumb), zoom=0.5)
ab = AnnotationBbox(imagebox, (idx, single_frame_pred[idx]), frameon=False, box_alignment=(0.5, -0.1))
ax.add_artist(ab)
plt.xlabel("Frame")
plt.ylabel("Prediction")
plt.tight_layout()
plt.savefig(filename)
plt.close()
def save_timeline_jpg(frames, scene_change_indices, filename, interval=5, roi_radius=2, title=None, single_frame_pred=None):
"""
Save a timeline JPG with thumbnails every `interval` frames and every frame near scene changes.
Scene change regions are highlighted. Each thumbnail is annotated with its frame index.
"""
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import matplotlib.patches as mpatches
import numpy as np
# Determine frames to render
frames_to_render = set(range(0, len(frames), interval))
for idx in scene_change_indices:
for offset in range(-roi_radius, roi_radius+1):
fidx = idx + offset
if 0 <= fidx < len(frames):
frames_to_render.add(fidx)
frames_to_render = sorted(frames_to_render)
# Map frames to evenly spaced positions
n = len(frames_to_render)
x_positions = list(range(n))
fig, ax = plt.subplots(figsize=(max(8, n*0.5), 3))
ax.set_xlim(-1, n)
ax.set_ylim(0, 1)
ax.axis('off')
# Highlight scene change regions
for idx in scene_change_indices:
region = [i for i, fidx in enumerate(frames_to_render) if abs(fidx-idx) <= roi_radius]
if region:
start, end = region[0], region[-1]
rect = mpatches.Rectangle((start-0.5, 0.05), end-start+1, 0.9, color='yellow', alpha=0.2)
ax.add_patch(rect)
# Prepare sets for quick lookup
last_frames = set(scene_change_indices)
first_frames = set(idx + 1 for idx in scene_change_indices if idx + 1 < len(frames))
# Draw thumbnails and annotate
for i, fidx in enumerate(frames_to_render):
thumb = frames[fidx].resize((32, 32))
imagebox = OffsetImage(np.array(thumb), zoom=0.7)
# Determine border color
if fidx in last_frames:
bboxprops = dict(edgecolor='red', linewidth=2)
elif fidx in first_frames:
bboxprops = dict(edgecolor='green', linewidth=2)
else:
bboxprops = None
ab = AnnotationBbox(
imagebox,
(x_positions[i], 0.6),
frameon=True,
box_alignment=(0.5, 0.5),
bboxprops=bboxprops
)
ax.add_artist(ab)
# Draw frame index
ax.text(x_positions[i], 0.32, str(fidx), ha='center', va='center', fontsize=9, color='black', bbox=dict(facecolor='white', edgecolor='none', alpha=0.8, boxstyle='round,pad=0.2'))
# Draw prediction value below frame index
if single_frame_pred is not None:
pred_val = single_frame_pred[fidx]
# Ensure pred_val is a scalar float for formatting
if isinstance(pred_val, np.ndarray):
pred_val = float(pred_val.squeeze())
ax.text(x_positions[i], 0.18, f"{pred_val:.2f}", ha='center', va='center', fontsize=8, color='blue', bbox=dict(facecolor='white', edgecolor='none', alpha=0.7, boxstyle='round,pad=0.2'))
if title:
ax.text(0, 0.95, title, fontsize=12, ha='left', va='top', color='navy')
plt.tight_layout()
plt.savefig(filename, dpi=150)
plt.close(fig)
def frames_to_video_tensor(frames):
"""Convert a list of PIL frames to a torch tensor of shape (num_frames, 27, 48, 3) and dtype uint8."""
import numpy as np
from PIL import Image
processed = []
for frame in frames:
arr = np.array(frame.resize((48, 27), resample=Image.Resampling.BILINEAR), dtype=np.uint8)
processed.append(torch.from_numpy(arr))
return torch.stack(processed)
def detect_scene_changes(frames):
video_tensor = frames_to_video_tensor(frames)
video_tensor = video_tensor.unsqueeze(0).to(device) # shape: 1 x num_frames x H x W x 3
with torch.no_grad():
single_frame_logits, all_frame_logits = model(video_tensor)
# Squeeze last dimension so output is flat (num_frames,)
single_frame_logits_np = single_frame_logits.cpu().numpy().squeeze() # shape: (num_frames,)
all_frame_logits_np = all_frame_logits["many_hot"].cpu().numpy().squeeze() # shape: (num_frames,)
single_frame_pred = torch.sigmoid(single_frame_logits).cpu().numpy().squeeze() # shape: (num_frames,)
all_frame_pred_np = torch.sigmoid(all_frame_logits["many_hot"]).cpu().numpy().squeeze() # shape: (num_frames,)
return {
"single_frame_pred": single_frame_pred,
"all_frame_pred": all_frame_pred_np,
"single_frame_logits": single_frame_logits_np,
"all_frame_logits": all_frame_logits_np,
}
def cached_detect_scene_changes(file, original_frames):
"""Detect scene changes with caching to avoid redundant computation."""
match = re.search(r"sample-(\d+)", file.name)
sample_num = match.group(1) if match else "unknown"
transnetv2_json = file.parent / f"sample-{sample_num}.transnetv2.json"
if transnetv2_json.exists():
with open(transnetv2_json, "r") as f:
result = json.load(f)
result["single_frame_pred"] = np.array(result["single_frame_pred"])
result["all_frame_pred"] = np.array(result["all_frame_pred"])
result["single_frame_logits"] = np.array(result["single_frame_logits"])
result["all_frame_logits"] = np.array(result["all_frame_logits"])
else:
result = detect_scene_changes(original_frames)
# Save model output to cache file
with open(transnetv2_json, "w") as f:
json.dump({
"single_frame_pred": result["single_frame_pred"].tolist(),
"all_frame_pred": result["all_frame_pred"].tolist(),
"single_frame_logits": result["single_frame_logits"].tolist(),
"all_frame_logits": result["all_frame_logits"].tolist()
}, f, indent=2)
return result
if __name__ == "__main__":
device = get_best_device()
print(f"Using device: {device}")
model = TransNetV2()
state_dict = torch.load("transnetv2-pytorch-weights.pth")
model.load_state_dict(state_dict)
model.eval().to(device)
for file in files:
match = re.search(r"sample-(\d+)", file.name)
sample_num = match.group(1) if match else "unknown"
original_frames = load_original_frames(file)
result = cached_detect_scene_changes(file, original_frames)
# scene_change_indices = [i for i, p in enumerate(result["single_frame_pred"]) if p >= SCENE_CUT_THRESHOLD]
# Detect top-K-1 scene changes
single_frame_pred = result["single_frame_pred"]
# Ignore first and last frame when selecting scene changes, and enforce MIN_DURATION_FRAMES between cuts
valid_indices = np.arange(1, len(single_frame_pred) - 1)
valid_preds = single_frame_pred[1:-1]
# Sort indices by prediction score (descending)
sorted_indices = valid_indices[np.argsort(valid_preds)[::-1]]
scene_change_indices = []
scene_cut_confidences = []
for idx in sorted_indices:
if all(abs(idx - prev) >= MIN_DURATION_FRAMES for prev in scene_change_indices):
scene_change_indices.append(int(idx))
scene_cut_confidences.append(float(single_frame_pred[idx]))
if len(scene_change_indices) >= (K - 1):
break
# Check if any confidence is below MIN_CONFIDENCE
failed = any(conf < MIN_CONFIDENCE for conf in scene_cut_confidences)
print(f"File: {file.name}, Frames: {len(original_frames)}, Scene Changes: {len(scene_change_indices)}, Success: {not failed}")
# Save results to JSON (include threshold and predictions)
json_filename = file.parent / f"sample-{sample_num}.json"
with open(json_filename, "w") as f:
json.dump({
"num_frames": len(original_frames),
"scene_change_indices": scene_change_indices,
"scene_cut_confidences": scene_cut_confidences,
# "threshold": SCENE_CUT_THRESHOLD
"params": {
"k": K,
"min_duration_frames": MIN_DURATION_FRAMES,
"min_confidence": MIN_CONFIDENCE,
},
"success": not failed
}, f, indent=2)
# Save timeline JPG
timeline_filename = file.parent / f"sample-{sample_num}.timeline.jpg"
plot_filename = file.parent / f"sample-{sample_num}.plot.jpg"
save_timeline_jpg(
frames=original_frames,
scene_change_indices=scene_change_indices,
filename=timeline_filename,
interval=10,
roi_radius=2,
title=f"Timeline: {file.name}",
single_frame_pred=result["single_frame_pred"]
)
# Save prediction plot with thumbnails
save_prediction_plot(
single_frame_pred=result["single_frame_pred"],
original_frames=original_frames,
filename=plot_filename,
interval=5,
title=f"Single Frame Predictions: {file.name}"
)