File size: 16,833 Bytes
5dc6505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
#!/usr/bin/env python3
"""
Pose ViT dimensional importance analysis.

This script mimics the pose-assist (video2pose → pad-to-2048) pipeline used in Sign-X:
  1. Load the ViT-based video2pose encoder and the PadMatch+LayerNorm projection from
     the video2text checkpoint (e.g., video2text_checkpoint_epoch_14.pth).
  2. Sample frames from a given video, extract per-frame pose representations,
     and project them to 2048 dimensions.
  3. Compute simple importance scores (mean absolute activation per dimension),
     then export CSV/plots/report summarising the dominant pose dimensions.
"""

import argparse
import json
import os
import pickle
import sys
import types
from pathlib import Path

import cv2
import matplotlib

matplotlib.use("Agg")
import matplotlib.pyplot as plt  # noqa: E402
import numpy as np  # noqa: E402
import torch  # noqa: E402
import torch.nn as nn  # noqa: E402
from PIL import Image  # noqa: E402
from torchvision import transforms  # noqa: E402
try:
    import timm  # noqa: E402
except ImportError as exc:
    raise ImportError("timm is required for ViT backbone. Please install timm.") from exc


INPUT_DIMS = {
    "dwpose": 384,
    "mediapipe_pose": 258,
    "primedepth_depth": 576,
    "sapiens_segmentation": 576,
    "smplerx": 165,
}
POSE_TYPE_ORDER = ["dwpose", "mediapipe_pose", "primedepth_depth", "sapiens_segmentation", "smplerx"]


class CodeBook:  # noqa: D401 - Dummy placeholder so torch.load can unpickle checkpoints.
    """Placeholder CodeBook to satisfy torch.load when checkpoints store this object."""

    def __init__(self, *args, **kwargs):
        self.vocab_size = kwargs.get("vocab_size", 0)


class Video2Pose(nn.Module):
    """Minimal replica of the pose-assist encoder (ViT + temporal attention + per-type projection)."""

    def __init__(self, input_dims):
        super().__init__()
        self.backbone = timm.create_model("vit_base_patch16_224", pretrained=True, num_classes=0)
        self.temporal_attention = nn.MultiheadAttention(768, num_heads=8)
        self.temporal_norm = nn.LayerNorm(768)
        self.projections = nn.ModuleDict({pose: nn.Linear(768, dim) for pose, dim in input_dims.items()})

    def forward(self, x):
        # x: [B, F, 3, H, W]
        B, F, C, H, W = x.shape
        features = self.backbone(x.view(B * F, C, H, W))  # [B*F, 768]
        features = features.view(B, F, -1).transpose(0, 1)  # [F, B, 768]
        attended, _ = self.temporal_attention(features, features, features)
        attended = self.temporal_norm(attended).transpose(0, 1)  # [B, F, 768]
        return {pose: proj(attended) for pose, proj in self.projections.items()}


class PadMatch(nn.Module):
    """Pad features to hidden_dim and apply LayerNorm (weights loaded from checkpoint)."""

    def __init__(self, input_dim, hidden_dim):
        super().__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.pad = hidden_dim - input_dim
        if self.pad < 0:
            raise ValueError(f"hidden_dim {hidden_dim} must be >= input_dim {input_dim}")
        self.layer_norm = nn.LayerNorm(hidden_dim)

    def forward(self, x):
        if self.pad > 0:
            x = nn.functional.pad(x, (0, self.pad), "constant", 0.0)
        return self.layer_norm(x)


def parse_args():
    parser = argparse.ArgumentParser(description="Pose ViT dimensional analysis (video2pose → 2048D).")
    parser.add_argument("--video", required=True, help="Path to input video (.mp4).")
    parser.add_argument(
        "--checkpoint",
        default="smkd/pretrained/video2text_checkpoint_epoch_14.pth",
        help="Path to video2text checkpoint containing video2pose weights.",
    )
    parser.add_argument(
        "--output-dir",
        default="pose_vit_feature_analysis",
        help="Directory to store feature dumps, plots, and summary.",
    )
    parser.add_argument("--num-frames", type=int, default=32, help="Frames sampled uniformly from the video.")
    parser.add_argument("--device", choices=["cuda", "cpu"], default="cuda", help="Torch device preference.")
    parser.add_argument("--topk", type=int, default=32, help="Number of top dimensions to visualise.")
    return parser.parse_args()


def prepare_device(pref: str) -> torch.device:
    if pref == "cuda" and torch.cuda.is_available():
        return torch.device("cuda")
    return torch.device("cpu")


def ensure_dir(path: str) -> Path:
    dst = Path(path)
    dst.mkdir(parents=True, exist_ok=True)
    return dst


def load_checkpoint(checkpoint_path: str, device: torch.device):
    """Load checkpoint with safe unpickling fallback."""
    try:
        ckpt = torch.load(checkpoint_path, map_location=device, weights_only=True)
    except (TypeError, AttributeError, pickle.UnpicklingError):
        torch.serialization.add_safe_globals([CodeBook])
        ckpt = torch.load(checkpoint_path, map_location=device, weights_only=False)

    if isinstance(ckpt, dict) and "model_state_dict" in ckpt:
        return ckpt["model_state_dict"]
    return ckpt


def load_video2pose_weights(model: Video2Pose, state_dict):
    sub_state = {k.replace("video2pose.", "", 1): v for k, v in state_dict.items() if k.startswith("video2pose.")}
    missing, unexpected = model.load_state_dict(sub_state, strict=False)
    if missing:
        print(f"[WARN] Missing video2pose keys ({len(missing)}): {missing[:5]}...")
    if unexpected:
        print(f"[WARN] Unexpected video2pose keys ({len(unexpected)}): {unexpected[:5]}...")


def load_padmatch_weights(projector: PadMatch, state_dict):
    sub_state = {
        k.replace("pose2text.dim_match.", "", 1): v
        for k, v in state_dict.items()
        if k.startswith("pose2text.dim_match.")
    }
    ln_state = {}
    if "1.weight" in sub_state:
        ln_state["weight"] = sub_state["1.weight"]
    if "1.bias" in sub_state:
        ln_state["bias"] = sub_state["1.bias"]
    if ln_state:
        projector.layer_norm.load_state_dict(ln_state, strict=False)
    if "1.weight" not in sub_state or "1.bias" not in sub_state:
        print("[WARN] LayerNorm weights not found in checkpoint; using default initialisation.")


def load_and_process_video(video_path: str, num_frames: int):
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        raise RuntimeError(f"Unable to open video {video_path}")

    total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    if total <= 0:
        raise RuntimeError(f"No frames found in video {video_path}")

    indices = np.linspace(0, max(total - 1, 0), num=num_frames, dtype=np.int32)
    transform = transforms.Compose(
        [
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
        ]
    )

    frames = []
    for idx in indices:
        cap.set(cv2.CAP_PROP_POS_FRAMES, int(idx))
        ok, frame = cap.read()
        if not ok:
            continue
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frames.append(transform(Image.fromarray(frame)))

    cap.release()

    if not frames:
        raise RuntimeError(f"Failed to decode frames from {video_path}")

    while len(frames) < num_frames:
        frames.append(frames[-1].clone())

    return torch.stack(frames[:num_frames], dim=0)  # [F, 3, 224, 224]


def compute_importance(pose_2048: torch.Tensor):
    with torch.no_grad():
        scores = pose_2048.abs().mean(dim=(0, 1)).cpu().numpy()
    normalized = scores / (scores.max() + 1e-8)
    return scores, normalized


def plot_top_dimensions(scores, top_indices, output_path):
    plt.figure(figsize=(max(8, len(top_indices) * 0.35), 4))
    plt.bar(range(len(top_indices)), scores[top_indices], color="#1f77b4")
    plt.xticks(range(len(top_indices)), [str(i) for i in top_indices], rotation=60)
    plt.ylabel("Mean |activation|")
    plt.xlabel("Dimension")
    plt.title("Top pose dimensions")
    plt.tight_layout()
    plt.savefig(output_path, dpi=240)
    plt.close()


def plot_heatmap(normalized_scores, output_path):
    # Only use the actual pose dimensions (excluding padding)
    total_pose_dims = sum(INPUT_DIMS.values())  # 1959
    rows = 32
    cols = int(np.ceil(total_pose_dims / rows))  # 62 columns needed

    # Only use real pose features, not padding
    heat_data = normalized_scores[:total_pose_dims]
    # Pad to fill the rectangle if needed
    needed = rows * cols
    if len(heat_data) < needed:
        heat_data = np.pad(heat_data, (0, needed - len(heat_data)), constant_values=0)
    heat = heat_data.reshape(rows, cols)

    # Calculate pose type boundaries
    boundaries = []
    cumsum = 0
    for pose_type in POSE_TYPE_ORDER:
        cumsum += INPUT_DIMS[pose_type]
        boundaries.append(cumsum)
    # boundaries = [384, 642, 1218, 1794, 1959]

    # Adjust figure size to reduce right-side whitespace
    fig, ax = plt.subplots(figsize=(12, 6))
    im = ax.imshow(heat, aspect="auto", cmap="magma", extent=[0, cols, rows, 0])

    # Set limits to avoid extra space
    ax.set_xlim(0, cols)
    ax.set_ylim(rows, 0)  # Invert y-axis

    # Draw red lines to separate pose types
    # Convert dimension index to (row, col) in the heatmap
    for boundary_dim in boundaries:
        row = boundary_dim // cols
        col = boundary_dim % cols

        if col == 0:
            # Boundary is at the start of a row, draw horizontal line
            ax.axhline(y=row, color='red', linewidth=1.2, linestyle='-', alpha=0.9)
        else:
            # Boundary is in the middle of a row, draw an L-shaped line
            # Vertical line from current position to end of row
            ax.plot([col, col], [row, row + 1],
                   color='red', linewidth=1.2, linestyle='-', alpha=0.9)
            # Horizontal line at the bottom of current row
            ax.plot([0, col], [row + 1, row + 1],
                   color='red', linewidth=1.2, linestyle='-', alpha=0.9)
            # Horizontal line at the top of next row (if boundary continues)
            if row < rows - 1:
                ax.plot([col, cols], [row + 1, row + 1],
                       color='red', linewidth=1.2, linestyle='-', alpha=0.9)

    # Add text labels for pose types at region centers (1.5x size)
    pose_labels = POSE_TYPE_ORDER
    pose_boundaries = [0] + boundaries
    for i, pose_name in enumerate(pose_labels):
        start_dim = pose_boundaries[i]
        end_dim = pose_boundaries[i + 1] - 1  # Last dimension in region

        # Calculate geometric center for regions spanning multiple rows
        start_row = start_dim // cols
        start_col = start_dim % cols
        end_row = end_dim // cols
        end_col = end_dim % cols

        region_size = pose_boundaries[i + 1] - start_dim

        # Calculate center row
        center_row = (start_row + end_row) / 2.0

        # Calculate center col based on region shape
        if start_row == end_row:
            # Single row: simple average
            center_col = (start_col + end_col) / 2.0
        else:
            # Multi-row: calculate weighted average col
            total_cells = 0
            weighted_col = 0

            # First partial row
            first_row_cells = cols - start_col
            weighted_col += (start_col + cols - 1) / 2.0 * first_row_cells
            total_cells += first_row_cells

            # Full middle rows
            middle_rows = end_row - start_row - 1
            if middle_rows > 0:
                weighted_col += (cols / 2.0) * cols * middle_rows
                total_cells += cols * middle_rows

            # Last partial row
            last_row_cells = end_col + 1
            weighted_col += (end_col / 2.0) * last_row_cells
            total_cells += last_row_cells

            center_col = weighted_col / total_cells

        # Add label if there's enough space
        if region_size >= 50:
            ax.text(center_col, center_row, pose_name,
                   fontsize=14, ha='center', va='center',
                   color='white', weight='bold',
                   bbox=dict(boxstyle='round,pad=0.3', facecolor='black', alpha=0.5))

    # Set labels and title with 2x font size
    ax.set_xlabel("Chunk index", fontsize=20)
    ax.set_ylabel("Row", fontsize=20)
    ax.set_title("Pose dimension importance heatmap", fontsize=24)

    # Set tick label size to 2x
    ax.tick_params(axis='both', which='major', labelsize=20)

    # Add colorbar with larger font, shrink to reduce width
    cbar = plt.colorbar(im, ax=ax, fraction=0.046, pad=0.04)
    cbar.ax.tick_params(labelsize=20)
    cbar.set_label("Normalized importance", fontsize=20)

    plt.tight_layout()
    plt.savefig(output_path, dpi=240, bbox_inches='tight')
    # Also save as PDF
    pdf_path = output_path.parent / (output_path.stem + ".pdf")
    plt.savefig(pdf_path, bbox_inches='tight')
    plt.close()


def plot_cumulative(scores, output_path):
    sorted_scores = np.sort(scores)[::-1]
    coverage = np.cumsum(sorted_scores) / sorted_scores.sum()
    plt.figure(figsize=(8, 4))
    plt.plot(np.arange(1, len(sorted_scores) + 1), coverage, color="#ff7f0e")
    plt.xlabel("Top-k dimensions")
    plt.ylabel("Cumulative coverage")
    plt.grid(alpha=0.3)
    plt.tight_layout()
    plt.savefig(output_path, dpi=240)
    plt.close()
    return coverage


def save_csv(scores, normalized, output_path):
    with open(output_path, "w", encoding="utf-8") as handle:
        handle.write("dimension,score,normalized\n")
        for idx, (score, norm) in enumerate(zip(scores, normalized)):
            handle.write(f"{idx},{score:.8f},{norm:.6f}\n")


def write_report(video, checkpoint, scores, normalized, coverage, top_indices, output_path):
    with open(output_path, "w", encoding="utf-8") as handle:
        handle.write("Pose ViT dimensional analysis\n")
        handle.write("=" * 60 + "\n\n")
        handle.write(f"Video      : {video}\n")
        handle.write(f"Checkpoint : {checkpoint}\n")
        handle.write(f"Total dims : {scores.shape[0]}\n\n")
        handle.write("Top dimensions:\n")
        for rank, dim_idx in enumerate(top_indices, 1):
            handle.write(
                f"{rank:02d}. dim {dim_idx:04d} | score={scores[dim_idx]:.6f} "
                f"| normalized={normalized[dim_idx]:.4f}\n"
            )

        handle.write("\nCoverage milestones:\n")
        for pct in (0.25, 0.5, 0.9):
            required = np.argmax(coverage >= pct) + 1
            handle.write(f"  - Top {required:4d} dims explain {pct:.0%} of energy\n")

        handle.write("\nScores = mean absolute activation over frames/batch.\n")


def main():
    args = parse_args()
    video_abs = os.path.abspath(args.video)
    ckpt_abs = os.path.abspath(args.checkpoint)
    out_dir = ensure_dir(args.output_dir)

    if not os.path.exists(video_abs):
        raise FileNotFoundError(f"Video not found: {video_abs}")
    if not os.path.exists(ckpt_abs):
        raise FileNotFoundError(f"Checkpoint not found: {ckpt_abs}")

    device = prepare_device(args.device)
    print(f"[INFO] Using device: {device}")

    state_dict = load_checkpoint(ckpt_abs, device)
    video2pose = Video2Pose(INPUT_DIMS).to(device)
    load_video2pose_weights(video2pose, state_dict)
    projector = PadMatch(sum(INPUT_DIMS.values()), 2048).to(device)
    load_padmatch_weights(projector, state_dict)

    frames = load_and_process_video(video_abs, args.num_frames).unsqueeze(0).to(device)  # [1, F, 3, 224, 224]

    with torch.no_grad():
        pose_dict = video2pose(frames)
        pose_concat = torch.cat([pose_dict[ptype] for ptype in POSE_TYPE_ORDER if ptype in pose_dict], dim=-1)
        B, F, D = pose_concat.shape
        pose_flat = pose_concat.reshape(B * F, D)
        pose_2048 = projector(pose_flat).view(B, F, -1)
        np.save(out_dir / "pose_2048.npy", pose_2048.cpu().numpy())

    scores, normalized = compute_importance(pose_2048)
    save_csv(scores, normalized, out_dir / "dimension_scores.csv")

    topk = min(args.topk, scores.shape[0])
    top_indices = np.argsort(scores)[::-1][:topk]
    plot_top_dimensions(scores, top_indices, out_dir / "top_dimensions.png")
    plot_heatmap(normalized, out_dir / "dimension_heatmap.png")
    coverage = plot_cumulative(scores, out_dir / "cumulative_importance.png")
    write_report(video_abs, ckpt_abs, scores, normalized, coverage, top_indices, out_dir / "analysis_report.txt")

    meta = {
        "video": video_abs,
        "checkpoint": ckpt_abs,
        "num_frames": args.num_frames,
        "device": str(device),
        "top_dimensions": top_indices.tolist(),
    }
    with open(out_dir / "metadata.json", "w", encoding="utf-8") as handle:
        json.dump(meta, handle, indent=2)

    print(f"[INFO] Analysis complete. Artifacts saved to: {out_dir}")


if __name__ == "__main__":
    main()