Candle commited on
Commit
29736b1
·
1 Parent(s): 2abeac8
data/animations/sample-000.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "scene_change_indices": [
3
+ 40
4
+ ]
5
+ }
data/animations/sample-000.timeline.jpg ADDED

Git LFS Details

  • SHA256: 6d655e8cf6e9901ef143fb27c73c82f0f61aa3eff4c1d88fdffc6db1611ad7d0
  • Pointer size: 130 Bytes
  • Size of remote file: 60.8 kB
detect_scene.py CHANGED
@@ -2,7 +2,13 @@ import torch
2
  import numpy as np
3
  from pathlib import Path
4
  from transnetv2_pytorch import TransNetV2
 
 
5
 
 
 
 
 
6
  def get_best_device():
7
  if torch.cuda.is_available():
8
  return torch.device("cuda")
@@ -12,39 +18,110 @@ def get_best_device():
12
  else:
13
  return torch.device("cpu")
14
 
15
- device = get_best_device()
16
- print(f"Using device: {device}")
17
- model = TransNetV2()
18
- state_dict = torch.load("transnetv2-pytorch-weights.pth")
19
- model.load_state_dict(state_dict)
20
- # model.eval().cuda()
21
- model.eval().to(device)
22
-
23
- # # Sample Code from the original repo
24
- # with torch.no_grad():
25
- # # shape: batch dim x video frames x frame height x frame width x RGB (not BGR) channels
26
- # input_video = torch.zeros(1, 100, 27, 48, 3, dtype=torch.uint8)
27
- # # single_frame_pred, all_frame_pred = model(input_video.cuda())
28
- # single_frame_pred, all_frame_pred = model(input_video)
29
-
30
- # single_frame_pred = torch.sigmoid(single_frame_pred).cpu().numpy()
31
- # all_frame_pred = torch.sigmoid(all_frame_pred["many_hot"]).cpu().numpy()
32
-
33
- # # plot results
34
- # import matplotlib.pyplot as plt
35
- # plt.figure(figsize=(12, 4))
36
- # plt.subplot(1, 2, 1)
37
- # plt.title("Single Frame Predictions")
38
- # plt.plot(single_frame_pred[0])
39
- # plt.subplot(1, 2, 2)
40
- # plt.title("All Frame Predictions")
41
- # plt.imshow(all_frame_pred[0].T, aspect="auto", cmap="gray")
42
- # plt.show()
43
- # # plt.savefig("test_output.png")
44
-
45
- # Load sample-*.webp files (each file is an animated webp files with ~120 frames)
46
- # from data/animations folder then run detection.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  def load_webp_animation(filepath):
50
  from PIL import Image
@@ -70,7 +147,7 @@ def detect_scene_changes(filepath):
70
  single_frame_pred = torch.sigmoid(single_frame_pred).cpu().numpy()[0]
71
  all_frame_pred_np = torch.sigmoid(all_frame_pred["many_hot"]).cpu().numpy()[0]
72
  # Get frame indices where scene changes occur (threshold at 0.5)
73
- scene_change_indices = [i for i, p in enumerate(single_frame_pred) if p >= 0.5]
74
  return {
75
  "single_frame_pred": single_frame_pred,
76
  "all_frame_pred": all_frame_pred_np,
@@ -78,49 +155,39 @@ def detect_scene_changes(filepath):
78
  "num_frames": video_tensor.shape[1]
79
  }
80
 
81
- data_dir = Path("data/animations")
82
-
83
- # Get all sample-*.webp files
84
- files = sorted(data_dir.glob("sample-000.webp"))
85
-
86
- import matplotlib.pyplot as plt
87
- import re
88
-
89
- for file in files:
90
- result = detect_scene_changes(file)
91
- # Extract sample number from filename
92
- match = re.search(r"sample-(\d+)", file.name)
93
- sample_num = match.group(1) if match else "unknown"
94
- plot_filename = file.parent / f"sample-{sample_num}.plot.jpg"
95
-
96
- # Load original frames for thumbnails
97
- from PIL import Image
98
- im = Image.open(file)
99
- original_frames = []
100
- try:
101
- while True:
102
- original_frames.append(im.convert("RGB"))
103
- im.seek(im.tell() + 1)
104
- except EOFError:
105
- pass
106
-
107
- import matplotlib.pyplot as plt
108
- from matplotlib.offsetbox import OffsetImage, AnnotationBbox
109
-
110
- plt.figure(figsize=(12, 4))
111
- plt.title(f"Single Frame Predictions: {file.name}")
112
- plt.plot(result["single_frame_pred"])
113
- ax = plt.gca()
114
- # Add thumbnails at regular intervals (e.g., every 20 frames)
115
- interval = 5
116
- for idx in range(0, len(original_frames), interval):
117
- thumb = original_frames[idx].resize((64, 64))
118
- imagebox = OffsetImage(np.array(thumb), zoom=0.5)
119
- ab = AnnotationBbox(imagebox, (idx, result["single_frame_pred"][idx]), frameon=False, box_alignment=(0.5, -0.1))
120
- ax.add_artist(ab)
121
- plt.xlabel("Frame")
122
- plt.ylabel("Prediction")
123
- plt.tight_layout()
124
- plt.savefig(plot_filename)
125
- plt.close()
126
 
 
2
  import numpy as np
3
  from pathlib import Path
4
  from transnetv2_pytorch import TransNetV2
5
+ import json
6
+ import re
7
 
8
+ SCENE_CUT_THRESHOLD = 0.09
9
+ data_dir = Path("data/animations")
10
+ files = sorted(data_dir.glob("sample-000.webp"))
11
+
12
  def get_best_device():
13
  if torch.cuda.is_available():
14
  return torch.device("cuda")
 
18
  else:
19
  return torch.device("cpu")
20
 
21
+ def save_prediction_plot(single_frame_pred, original_frames, filename, interval=5, title=None):
22
+ """
23
+ Save a plot of single frame predictions with thumbnails annotated at regular intervals.
24
+ """
25
+ import matplotlib.pyplot as plt
26
+ from matplotlib.offsetbox import OffsetImage, AnnotationBbox
27
+ import numpy as np
28
+
29
+ plt.figure(figsize=(12, 4))
30
+ if title:
31
+ plt.title(title)
32
+ plt.plot(single_frame_pred)
33
+ ax = plt.gca()
34
+ # Add thumbnails at regular intervals
35
+ for idx in range(0, len(original_frames), interval):
36
+ thumb = original_frames[idx].resize((64, 64))
37
+ imagebox = OffsetImage(np.array(thumb), zoom=0.5)
38
+ ab = AnnotationBbox(imagebox, (idx, single_frame_pred[idx]), frameon=False, box_alignment=(0.5, -0.1))
39
+ ax.add_artist(ab)
40
+ plt.xlabel("Frame")
41
+ plt.ylabel("Prediction")
42
+ plt.tight_layout()
43
+ plt.savefig(filename)
44
+ plt.close()
45
+
46
+ def load_original_frames(filepath):
47
+ """Load original frames from an animated webp file as PIL Images."""
48
+ from PIL import Image
49
+ im = Image.open(filepath)
50
+ frames = []
51
+ try:
52
+ while True:
53
+ frames.append(im.convert("RGB"))
54
+ im.seek(im.tell() + 1)
55
+ except EOFError:
56
+ pass
57
+ return frames
58
+
59
+ def save_timeline_jpg(frames, scene_change_indices, filename, interval=5, roi_radius=2, title=None):
60
+ """
61
+ Save a timeline JPG with thumbnails every `interval` frames and every frame near scene changes.
62
+ Scene change regions are highlighted. Each thumbnail is annotated with its frame index.
63
+ """
64
+ import matplotlib.pyplot as plt
65
+ from matplotlib.offsetbox import OffsetImage, AnnotationBbox
66
+ import matplotlib.patches as mpatches
67
+ import numpy as np
68
+
69
+ # Determine frames to render
70
+ frames_to_render = set(range(0, len(frames), interval))
71
+ for idx in scene_change_indices:
72
+ for offset in range(-roi_radius, roi_radius+1):
73
+ fidx = idx + offset
74
+ if 0 <= fidx < len(frames):
75
+ frames_to_render.add(fidx)
76
+ frames_to_render = sorted(frames_to_render)
77
 
78
+ # Map frames to evenly spaced positions
79
+ n = len(frames_to_render)
80
+ x_positions = list(range(n))
81
+
82
+ fig, ax = plt.subplots(figsize=(max(8, n*0.5), 3))
83
+ ax.set_xlim(-1, n)
84
+ ax.set_ylim(0, 1)
85
+ ax.axis('off')
86
+
87
+ # Highlight scene change regions
88
+ for idx in scene_change_indices:
89
+ region = [i for i, fidx in enumerate(frames_to_render) if abs(fidx-idx) <= roi_radius]
90
+ if region:
91
+ start, end = region[0], region[-1]
92
+ rect = mpatches.Rectangle((start-0.5, 0.05), end-start+1, 0.9, color='yellow', alpha=0.2)
93
+ ax.add_patch(rect)
94
+
95
+ # Prepare sets for quick lookup
96
+ last_frames = set(scene_change_indices)
97
+ first_frames = set(idx + 1 for idx in scene_change_indices if idx + 1 < len(frames))
98
+
99
+ # Draw thumbnails and annotate
100
+ for i, fidx in enumerate(frames_to_render):
101
+ thumb = frames[fidx].resize((32, 32))
102
+ imagebox = OffsetImage(np.array(thumb), zoom=0.7)
103
+ # Determine border color
104
+ if fidx in last_frames:
105
+ bboxprops = dict(edgecolor='red', linewidth=2, boxstyle='round,pad=0.2')
106
+ elif fidx in first_frames:
107
+ bboxprops = dict(edgecolor='green', linewidth=2, boxstyle='round,pad=0.2')
108
+ else:
109
+ bboxprops = None
110
+ ab = AnnotationBbox(
111
+ imagebox,
112
+ (x_positions[i], 0.6),
113
+ frameon=True,
114
+ box_alignment=(0.5, 0.5),
115
+ bboxprops=bboxprops
116
+ )
117
+ ax.add_artist(ab)
118
+ ax.text(x_positions[i], 0.25, str(fidx), ha='center', va='center', fontsize=9, color='black', bbox=dict(facecolor='white', edgecolor='none', alpha=0.8, boxstyle='round,pad=0.2'))
119
+
120
+ if title:
121
+ ax.text(0, 0.95, title, fontsize=12, ha='left', va='top', color='navy')
122
+ plt.tight_layout()
123
+ plt.savefig(filename, dpi=150)
124
+ plt.close(fig)
125
 
126
  def load_webp_animation(filepath):
127
  from PIL import Image
 
147
  single_frame_pred = torch.sigmoid(single_frame_pred).cpu().numpy()[0]
148
  all_frame_pred_np = torch.sigmoid(all_frame_pred["many_hot"]).cpu().numpy()[0]
149
  # Get frame indices where scene changes occur (threshold at 0.5)
150
+ scene_change_indices = [i for i, p in enumerate(single_frame_pred) if p >= SCENE_CUT_THRESHOLD]
151
  return {
152
  "single_frame_pred": single_frame_pred,
153
  "all_frame_pred": all_frame_pred_np,
 
155
  "num_frames": video_tensor.shape[1]
156
  }
157
 
158
+ if __name__ == "__main__":
159
+ device = get_best_device()
160
+ print(f"Using device: {device}")
161
+ model = TransNetV2()
162
+ state_dict = torch.load("transnetv2-pytorch-weights.pth")
163
+ model.load_state_dict(state_dict)
164
+ model.eval().to(device)
165
+ for file in files:
166
+ match = re.search(r"sample-(\d+)", file.name)
167
+ sample_num = match.group(1) if match else "unknown"
168
+ result = detect_scene_changes(file)
169
+ # Save results to JSON
170
+ json_filename = file.parent / f"sample-{sample_num}.json"
171
+ with open(json_filename, "w") as f:
172
+ json.dump({"scene_change_indices": result["scene_change_indices"]}, f, indent=2)
173
+ # Save timeline JPG
174
+ timeline_filename = file.parent / f"sample-{sample_num}.timeline.jpg"
175
+ plot_filename = file.parent / f"sample-{sample_num}.plot.jpg"
176
+ original_frames = load_original_frames(file)
177
+ save_timeline_jpg(
178
+ frames=original_frames,
179
+ scene_change_indices=result["scene_change_indices"],
180
+ filename=timeline_filename,
181
+ interval=5,
182
+ roi_radius=2,
183
+ title=f"Timeline: {file.name}"
184
+ )
185
+ # Save prediction plot with thumbnails
186
+ save_prediction_plot(
187
+ single_frame_pred=result["single_frame_pred"],
188
+ original_frames=original_frames,
189
+ filename=plot_filename,
190
+ interval=5,
191
+ title=f"Single Frame Predictions: {file.name}"
192
+ )
 
 
 
 
 
 
 
 
 
 
193