Candle commited on
Commit
efbb653
·
1 Parent(s): f3573cb

current impl

Browse files
.gitignore CHANGED
@@ -30,3 +30,8 @@ dist-ssr
30
  .vercel
31
  .env
32
  .env*.local
 
 
 
 
 
 
30
  .vercel
31
  .env
32
  .env*.local
33
+
34
+ *.pyc
35
+ __pycache__/
36
+ .pytest_cache/
37
+ .ipynb_checkpoints
README.md CHANGED
@@ -1,3 +1,16 @@
1
  # SpriteDX Dataset
2
 
3
  Starting this repo to store collected data from SpriteDX project.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # SpriteDX Dataset
2
 
3
  Starting this repo to store collected data from SpriteDX project.
4
+
5
+ ## References
6
+
7
+ This project uses the TransNet V2 model.
8
+
9
+ @article{soucek2020transnetv2,
10
+ title={TransNet V2: An effective deep network architecture for fast shot transition detection},
11
+ author={Sou{\v{c}}ek, Tom{\'a}{\v{s}} and Loko{\v{c}}, Jakub},
12
+ year={2020},
13
+ journal={arXiv preprint arXiv:2008.04838},
14
+ }
15
+
16
+ Original implementation: https://github.com/soCzech/TransNetV2
data/animations/sample-000.plot.jpg ADDED

Git LFS Details

  • SHA256: 9a6503d28174e36d0df3e7bf6d375fd1bdf064de02e689d555a4b145829be936
  • Pointer size: 130 Bytes
  • Size of remote file: 28.1 kB
detect_scene.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from pathlib import Path
4
+ from transnetv2_pytorch import TransNetV2
5
+
6
+ def get_best_device():
7
+ if torch.cuda.is_available():
8
+ return torch.device("cuda")
9
+ elif torch.backends.mps.is_available():
10
+ # return torch.device("mps")
11
+ return torch.device("cpu")
12
+ else:
13
+ return torch.device("cpu")
14
+
15
+ device = get_best_device()
16
+ print(f"Using device: {device}")
17
+ model = TransNetV2()
18
+ state_dict = torch.load("transnetv2-pytorch-weights.pth")
19
+ model.load_state_dict(state_dict)
20
+ # model.eval().cuda()
21
+ model.eval().to(device)
22
+
23
+ # # Sample Code from the original repo
24
+ # with torch.no_grad():
25
+ # # shape: batch dim x video frames x frame height x frame width x RGB (not BGR) channels
26
+ # input_video = torch.zeros(1, 100, 27, 48, 3, dtype=torch.uint8)
27
+ # # single_frame_pred, all_frame_pred = model(input_video.cuda())
28
+ # single_frame_pred, all_frame_pred = model(input_video)
29
+
30
+ # single_frame_pred = torch.sigmoid(single_frame_pred).cpu().numpy()
31
+ # all_frame_pred = torch.sigmoid(all_frame_pred["many_hot"]).cpu().numpy()
32
+
33
+ # # plot results
34
+ # import matplotlib.pyplot as plt
35
+ # plt.figure(figsize=(12, 4))
36
+ # plt.subplot(1, 2, 1)
37
+ # plt.title("Single Frame Predictions")
38
+ # plt.plot(single_frame_pred[0])
39
+ # plt.subplot(1, 2, 2)
40
+ # plt.title("All Frame Predictions")
41
+ # plt.imshow(all_frame_pred[0].T, aspect="auto", cmap="gray")
42
+ # plt.show()
43
+ # # plt.savefig("test_output.png")
44
+
45
+ # Load sample-*.webp files (each file is an animated webp files with ~120 frames)
46
+ # from data/animations folder then run detection.
47
+
48
+
49
+ def load_webp_animation(filepath):
50
+ from PIL import Image
51
+ import numpy as np
52
+ im = Image.open(filepath)
53
+ frames = []
54
+ try:
55
+ while True:
56
+ frame = im.convert("RGB").resize((48, 27), resample=Image.Resampling.BILINEAR) # resize to 48x27 (W x H)
57
+ arr = np.array(frame, dtype=np.uint8)
58
+ frames.append(torch.from_numpy(arr))
59
+ im.seek(im.tell() + 1)
60
+ except EOFError:
61
+ pass
62
+ video_tensor = torch.stack(frames) # shape: num_frames x 27 x 48 x 3
63
+ return video_tensor
64
+
65
+ def detect_scene_changes(filepath):
66
+ video_tensor = load_webp_animation(filepath)
67
+ video_tensor = video_tensor.unsqueeze(0).to(device) # shape: 1 x num_frames x H x W x 3
68
+ with torch.no_grad():
69
+ single_frame_pred, all_frame_pred = model(video_tensor)
70
+ single_frame_pred = torch.sigmoid(single_frame_pred).cpu().numpy()[0]
71
+ all_frame_pred_np = torch.sigmoid(all_frame_pred["many_hot"]).cpu().numpy()[0]
72
+ # Get frame indices where scene changes occur (threshold at 0.5)
73
+ scene_change_indices = [i for i, p in enumerate(single_frame_pred) if p >= 0.5]
74
+ return {
75
+ "single_frame_pred": single_frame_pred,
76
+ "all_frame_pred": all_frame_pred_np,
77
+ "scene_change_indices": scene_change_indices,
78
+ "num_frames": video_tensor.shape[1]
79
+ }
80
+
81
+ data_dir = Path("data/animations")
82
+
83
+ # Get all sample-*.webp files
84
+ files = sorted(data_dir.glob("sample-000.webp"))
85
+
86
+ import matplotlib.pyplot as plt
87
+ import re
88
+
89
+ for file in files:
90
+ result = detect_scene_changes(file)
91
+ # Extract sample number from filename
92
+ match = re.search(r"sample-(\d+)", file.name)
93
+ sample_num = match.group(1) if match else "unknown"
94
+ plot_filename = file.parent / f"sample-{sample_num}.plot.jpg"
95
+ # Plot single_frame_pred
96
+ plt.figure(figsize=(12, 4))
97
+ plt.title(f"Single Frame Predictions: {file.name}")
98
+ plt.plot(result["single_frame_pred"])
99
+ plt.xlabel("Frame")
100
+ plt.ylabel("Prediction")
101
+ plt.tight_layout()
102
+ plt.savefig(plot_filename)
103
+ plt.close()
104
+
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ pillow
2
+ torch
3
+ numpy
4
+ matplotlib
transnetv2-pytorch-weights.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a03191f1d886181b2d51508475761e15fd5c865ebc44494db443058cc051c918
3
+ size 30509621
transnetv2_pytorch.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Citation: https://github.com/soCzech/TransNetV2/tree/master/inference-pytorch
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as functional
5
+
6
+ import random
7
+
8
+
9
+ class TransNetV2(nn.Module):
10
+
11
+ def __init__(self,
12
+ F=16, L=3, S=2, D=1024,
13
+ use_many_hot_targets=True,
14
+ use_frame_similarity=True,
15
+ use_color_histograms=True,
16
+ use_mean_pooling=False,
17
+ dropout_rate=0.5,
18
+ use_convex_comb_reg=False, # not supported
19
+ use_resnet_features=False, # not supported
20
+ use_resnet_like_top=False, # not supported
21
+ frame_similarity_on_last_layer=False): # not supported
22
+ super(TransNetV2, self).__init__()
23
+
24
+ if use_resnet_features or use_resnet_like_top or use_convex_comb_reg or frame_similarity_on_last_layer:
25
+ raise NotImplemented("Some options not implemented in Pytorch version of Transnet!")
26
+
27
+ self.SDDCNN = nn.ModuleList(
28
+ [StackedDDCNNV2(in_filters=3, n_blocks=S, filters=F, stochastic_depth_drop_prob=0.)] +
29
+ [StackedDDCNNV2(in_filters=(F * 2 ** (i - 1)) * 4, n_blocks=S, filters=F * 2 ** i) for i in range(1, L)]
30
+ )
31
+
32
+ self.frame_sim_layer = FrameSimilarity(
33
+ sum([(F * 2 ** i) * 4 for i in range(L)]), lookup_window=101, output_dim=128, similarity_dim=128, use_bias=True
34
+ ) if use_frame_similarity else None
35
+ self.color_hist_layer = ColorHistograms(
36
+ lookup_window=101, output_dim=128
37
+ ) if use_color_histograms else None
38
+
39
+ self.dropout = nn.Dropout(dropout_rate) if dropout_rate is not None else None
40
+
41
+ output_dim = ((F * 2 ** (L - 1)) * 4) * 3 * 6 # 3x6 for spatial dimensions
42
+ if use_frame_similarity: output_dim += 128
43
+ if use_color_histograms: output_dim += 128
44
+
45
+ self.fc1 = nn.Linear(output_dim, D)
46
+ self.cls_layer1 = nn.Linear(D, 1)
47
+ self.cls_layer2 = nn.Linear(D, 1) if use_many_hot_targets else None
48
+
49
+ self.use_mean_pooling = use_mean_pooling
50
+ self.eval()
51
+
52
+ def forward(self, inputs):
53
+ assert isinstance(inputs, torch.Tensor) and list(inputs.shape[2:]) == [27, 48, 3] and inputs.dtype == torch.uint8, \
54
+ "incorrect input type and/or shape"
55
+ # uint8 of shape [B, T, H, W, 3] to float of shape [B, 3, T, H, W]
56
+ x = inputs.permute([0, 4, 1, 2, 3]).float()
57
+ x = x.div_(255.)
58
+
59
+ block_features = []
60
+ for block in self.SDDCNN:
61
+ x = block(x)
62
+ block_features.append(x)
63
+
64
+ if self.use_mean_pooling:
65
+ x = torch.mean(x, dim=[3, 4])
66
+ x = x.permute(0, 2, 1)
67
+ else:
68
+ x = x.permute(0, 2, 3, 4, 1)
69
+ x = x.reshape(x.shape[0], x.shape[1], -1)
70
+
71
+ if self.frame_sim_layer is not None:
72
+ x = torch.cat([self.frame_sim_layer(block_features), x], 2)
73
+
74
+ if self.color_hist_layer is not None:
75
+ x = torch.cat([self.color_hist_layer(inputs), x], 2)
76
+
77
+ x = self.fc1(x)
78
+ x = functional.relu(x)
79
+
80
+ if self.dropout is not None:
81
+ x = self.dropout(x)
82
+
83
+ one_hot = self.cls_layer1(x)
84
+
85
+ if self.cls_layer2 is not None:
86
+ return one_hot, {"many_hot": self.cls_layer2(x)}
87
+
88
+ return one_hot
89
+
90
+
91
+ class StackedDDCNNV2(nn.Module):
92
+
93
+ def __init__(self,
94
+ in_filters,
95
+ n_blocks,
96
+ filters,
97
+ shortcut=True,
98
+ use_octave_conv=False, # not supported
99
+ pool_type="avg",
100
+ stochastic_depth_drop_prob=0.0):
101
+ super(StackedDDCNNV2, self).__init__()
102
+
103
+ if use_octave_conv:
104
+ raise NotImplemented("Octave convolution not implemented in Pytorch version of Transnet!")
105
+
106
+ assert pool_type == "max" or pool_type == "avg"
107
+ if use_octave_conv and pool_type == "max":
108
+ print("WARN: Octave convolution was designed with average pooling, not max pooling.")
109
+
110
+ self.shortcut = shortcut
111
+ self.DDCNN = nn.ModuleList([
112
+ DilatedDCNNV2(in_filters if i == 1 else filters * 4, filters, octave_conv=use_octave_conv,
113
+ activation=functional.relu if i != n_blocks else None) for i in range(1, n_blocks + 1)
114
+ ])
115
+ self.pool = nn.MaxPool3d(kernel_size=(1, 2, 2)) if pool_type == "max" else nn.AvgPool3d(kernel_size=(1, 2, 2))
116
+ self.stochastic_depth_drop_prob = stochastic_depth_drop_prob
117
+
118
+ def forward(self, inputs):
119
+ x = inputs
120
+ shortcut = None
121
+
122
+ for block in self.DDCNN:
123
+ x = block(x)
124
+ if shortcut is None:
125
+ shortcut = x
126
+
127
+ x = functional.relu(x)
128
+
129
+ if self.shortcut is not None:
130
+ if self.stochastic_depth_drop_prob != 0.:
131
+ if self.training:
132
+ if random.random() < self.stochastic_depth_drop_prob:
133
+ x = shortcut
134
+ else:
135
+ x = x + shortcut
136
+ else:
137
+ x = (1 - self.stochastic_depth_drop_prob) * x + shortcut
138
+ else:
139
+ x += shortcut
140
+
141
+ x = self.pool(x)
142
+ return x
143
+
144
+
145
+ class DilatedDCNNV2(nn.Module):
146
+
147
+ def __init__(self,
148
+ in_filters,
149
+ filters,
150
+ batch_norm=True,
151
+ activation=None,
152
+ octave_conv=False): # not supported
153
+ super(DilatedDCNNV2, self).__init__()
154
+
155
+ if octave_conv:
156
+ raise NotImplemented("Octave convolution not implemented in Pytorch version of Transnet!")
157
+
158
+ assert not (octave_conv and batch_norm)
159
+
160
+ self.Conv3D_1 = Conv3DConfigurable(in_filters, filters, 1, use_bias=not batch_norm)
161
+ self.Conv3D_2 = Conv3DConfigurable(in_filters, filters, 2, use_bias=not batch_norm)
162
+ self.Conv3D_4 = Conv3DConfigurable(in_filters, filters, 4, use_bias=not batch_norm)
163
+ self.Conv3D_8 = Conv3DConfigurable(in_filters, filters, 8, use_bias=not batch_norm)
164
+
165
+ self.bn = nn.BatchNorm3d(filters * 4, eps=1e-3) if batch_norm else None
166
+ self.activation = activation
167
+
168
+ def forward(self, inputs):
169
+ conv1 = self.Conv3D_1(inputs)
170
+ conv2 = self.Conv3D_2(inputs)
171
+ conv3 = self.Conv3D_4(inputs)
172
+ conv4 = self.Conv3D_8(inputs)
173
+
174
+ x = torch.cat([conv1, conv2, conv3, conv4], dim=1)
175
+
176
+ if self.bn is not None:
177
+ x = self.bn(x)
178
+
179
+ if self.activation is not None:
180
+ x = self.activation(x)
181
+
182
+ return x
183
+
184
+
185
+ class Conv3DConfigurable(nn.Module):
186
+
187
+ def __init__(self,
188
+ in_filters,
189
+ filters,
190
+ dilation_rate,
191
+ separable=True,
192
+ octave=False, # not supported
193
+ use_bias=True,
194
+ kernel_initializer=None): # not supported
195
+ super(Conv3DConfigurable, self).__init__()
196
+
197
+ if octave:
198
+ raise NotImplemented("Octave convolution not implemented in Pytorch version of Transnet!")
199
+ if kernel_initializer is not None:
200
+ raise NotImplemented("Kernel initializers are not implemented in Pytorch version of Transnet!")
201
+
202
+ assert not (separable and octave)
203
+
204
+ if separable:
205
+ # (2+1)D convolution https://arxiv.org/pdf/1711.11248.pdf
206
+ conv1 = nn.Conv3d(in_filters, 2 * filters, kernel_size=(1, 3, 3),
207
+ dilation=(1, 1, 1), padding=(0, 1, 1), bias=False)
208
+ conv2 = nn.Conv3d(2 * filters, filters, kernel_size=(3, 1, 1),
209
+ dilation=(dilation_rate, 1, 1), padding=(dilation_rate, 0, 0), bias=use_bias)
210
+ self.layers = nn.ModuleList([conv1, conv2])
211
+ else:
212
+ conv = nn.Conv3d(in_filters, filters, kernel_size=3,
213
+ dilation=(dilation_rate, 1, 1), padding=(dilation_rate, 1, 1), bias=use_bias)
214
+ self.layers = nn.ModuleList([conv])
215
+
216
+ def forward(self, inputs):
217
+ x = inputs
218
+ for layer in self.layers:
219
+ x = layer(x)
220
+ return x
221
+
222
+
223
+ class FrameSimilarity(nn.Module):
224
+
225
+ def __init__(self,
226
+ in_filters,
227
+ similarity_dim=128,
228
+ lookup_window=101,
229
+ output_dim=128,
230
+ stop_gradient=False, # not supported
231
+ use_bias=False):
232
+ super(FrameSimilarity, self).__init__()
233
+
234
+ if stop_gradient:
235
+ raise NotImplemented("Stop gradient not implemented in Pytorch version of Transnet!")
236
+
237
+ self.projection = nn.Linear(in_filters, similarity_dim, bias=use_bias)
238
+ self.fc = nn.Linear(lookup_window, output_dim)
239
+
240
+ self.lookup_window = lookup_window
241
+ assert lookup_window % 2 == 1, "`lookup_window` must be odd integer"
242
+
243
+ def forward(self, inputs):
244
+ x = torch.cat([torch.mean(x, dim=[3, 4]) for x in inputs], dim=1)
245
+ x = torch.transpose(x, 1, 2)
246
+
247
+ x = self.projection(x)
248
+ x = functional.normalize(x, p=2, dim=2)
249
+
250
+ batch_size, time_window = x.shape[0], x.shape[1]
251
+ similarities = torch.bmm(x, x.transpose(1, 2)) # [batch_size, time_window, time_window]
252
+ similarities_padded = functional.pad(similarities, [(self.lookup_window - 1) // 2, (self.lookup_window - 1) // 2])
253
+
254
+ batch_indices = torch.arange(0, batch_size, device=x.device).view([batch_size, 1, 1]).repeat(
255
+ [1, time_window, self.lookup_window])
256
+ time_indices = torch.arange(0, time_window, device=x.device).view([1, time_window, 1]).repeat(
257
+ [batch_size, 1, self.lookup_window])
258
+ lookup_indices = torch.arange(0, self.lookup_window, device=x.device).view([1, 1, self.lookup_window]).repeat(
259
+ [batch_size, time_window, 1]) + time_indices
260
+
261
+ similarities = similarities_padded[batch_indices, time_indices, lookup_indices]
262
+ return functional.relu(self.fc(similarities))
263
+
264
+
265
+ class ColorHistograms(nn.Module):
266
+
267
+ def __init__(self,
268
+ lookup_window=101,
269
+ output_dim=None):
270
+ super(ColorHistograms, self).__init__()
271
+
272
+ self.fc = nn.Linear(lookup_window, output_dim) if output_dim is not None else None
273
+ self.lookup_window = lookup_window
274
+ assert lookup_window % 2 == 1, "`lookup_window` must be odd integer"
275
+
276
+ @staticmethod
277
+ def compute_color_histograms(frames):
278
+ frames = frames.int()
279
+
280
+ def get_bin(frames):
281
+ # returns 0 .. 511
282
+ R, G, B = frames[:, :, 0], frames[:, :, 1], frames[:, :, 2]
283
+ R, G, B = R >> 5, G >> 5, B >> 5
284
+ return (R << 6) + (G << 3) + B
285
+
286
+ batch_size, time_window, height, width, no_channels = frames.shape
287
+ assert no_channels == 3
288
+ frames_flatten = frames.view(batch_size * time_window, height * width, 3)
289
+
290
+ binned_values = get_bin(frames_flatten)
291
+ frame_bin_prefix = (torch.arange(0, batch_size * time_window, device=frames.device) << 9).view(-1, 1)
292
+ binned_values = (binned_values + frame_bin_prefix).view(-1)
293
+
294
+ histograms = torch.zeros(batch_size * time_window * 512, dtype=torch.int32, device=frames.device)
295
+ histograms.scatter_add_(0, binned_values, torch.ones(len(binned_values), dtype=torch.int32, device=frames.device))
296
+
297
+ histograms = histograms.view(batch_size, time_window, 512).float()
298
+ histograms_normalized = functional.normalize(histograms, p=2, dim=2)
299
+ return histograms_normalized
300
+
301
+ def forward(self, inputs):
302
+ x = self.compute_color_histograms(inputs)
303
+
304
+ batch_size, time_window = x.shape[0], x.shape[1]
305
+ similarities = torch.bmm(x, x.transpose(1, 2)) # [batch_size, time_window, time_window]
306
+ similarities_padded = functional.pad(similarities, [(self.lookup_window - 1) // 2, (self.lookup_window - 1) // 2])
307
+
308
+ batch_indices = torch.arange(0, batch_size, device=x.device).view([batch_size, 1, 1]).repeat(
309
+ [1, time_window, self.lookup_window])
310
+ time_indices = torch.arange(0, time_window, device=x.device).view([1, time_window, 1]).repeat(
311
+ [batch_size, 1, self.lookup_window])
312
+ lookup_indices = torch.arange(0, self.lookup_window, device=x.device).view([1, 1, self.lookup_window]).repeat(
313
+ [batch_size, time_window, 1]) + time_indices
314
+
315
+ similarities = similarities_padded[batch_indices, time_indices, lookup_indices]
316
+
317
+ if self.fc is not None:
318
+ return functional.relu(self.fc(similarities))
319
+ return similarities