ASLLRP_utterances_results / SignX /eval /generate_feature_mapping.py
FangSen9000
The reasoning has been converted into English.
9f9e779
#!/usr/bin/env python
"""
Generate a feature-to-frame mapping file for SignX inference outputs.
Usage:
python generate_feature_mapping.py <sample_dir> <video_path>
Example:
python generate_feature_mapping.py detailed_prediction_20251226_155113/sample_000 \\
eval/tiny_test_data/videos/632051.mp4
"""
import sys
import os
import json
import numpy as np
from pathlib import Path
def generate_feature_mapping(sample_dir, video_path):
"""Create the feature-to-frame mapping JSON for a given sample directory."""
sample_dir = Path(sample_dir)
# Check if attention_weights.npy exists
attn_file = sample_dir / "attention_weights.npy"
if not attn_file.exists():
print(f"Error: missing attention_weights.npy: {attn_file}")
return False
# Load attention weights to get feature count
attn_weights = np.load(attn_file)
# Handle both 2D (time, features) and 3D (time, beam, features) shapes
if attn_weights.ndim == 2:
feature_count = attn_weights.shape[1] # Shape: (time, features) - inference mode
elif attn_weights.ndim == 3:
feature_count = attn_weights.shape[2] # Shape: (time, beam, features) - beam search
else:
print(f"Error: unexpected attention_weights shape: {attn_weights.shape}")
return False
print(f"Feature count: {feature_count}")
# Get original frame count from video
try:
import cv2
cap = cv2.VideoCapture(str(video_path))
if not cap.isOpened():
print(f"Error: failed to open video file: {video_path}")
return False
original_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
print(f"Original frames: {original_frame_count}, FPS: {fps}")
except ImportError:
print("Warning: OpenCV not available, falling back to estimates")
# Assume 30 fps and approximate the frame count from features
original_frame_count = feature_count * 3 # default 3x downsampling
fps = 30.0
# Calculate uniform mapping: feature i -> frames [start, end]
frame_mapping = []
for feat_idx in range(feature_count):
start_frame = int(feat_idx * original_frame_count / feature_count)
end_frame = int((feat_idx + 1) * original_frame_count / feature_count)
frame_mapping.append({
"feature_index": feat_idx,
"frame_start": start_frame,
"frame_end": end_frame,
"frame_count": end_frame - start_frame
})
# Save mapping
mapping_data = {
"original_frame_count": original_frame_count,
"feature_count": feature_count,
"downsampling_ratio": original_frame_count / feature_count,
"fps": fps,
"mapping": frame_mapping
}
output_file = sample_dir / "feature_frame_mapping.json"
with open(output_file, 'w') as f:
json.dump(mapping_data, f, indent=2)
print(f"\n✓ Mapping file written: {output_file}")
print(f" Original frames: {original_frame_count}")
print(f" Feature count: {feature_count}")
print(f" Downsampling ratio: {mapping_data['downsampling_ratio']:.2f}x")
# Print sample mappings
print("\nSample mappings:")
for i in range(min(3, len(frame_mapping))):
mapping = frame_mapping[i]
print(f" Feature {mapping['feature_index']}: frames {mapping['frame_start']}-{mapping['frame_end']} "
f"({mapping['frame_count']} frames)")
if len(frame_mapping) > 3:
print(" ...")
mapping = frame_mapping[-1]
print(f" Feature {mapping['feature_index']}: frames {mapping['frame_start']}-{mapping['frame_end']} "
f"({mapping['frame_count']} frames)")
return True
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python generate_feature_mapping.py <sample_dir> <video_path>")
print("\nExample:")
print(" python generate_feature_mapping.py detailed_prediction_20251226_155113/sample_000 \\")
print(" eval/tiny_test_data/videos/632051.mp4")
sys.exit(1)
sample_dir = sys.argv[1]
video_path = sys.argv[2]
if not os.path.exists(sample_dir):
print(f"Error: sample directory not found: {sample_dir}")
sys.exit(1)
if not os.path.exists(video_path):
print(f"Error: video file not found: {video_path}")
sys.exit(1)
success = generate_feature_mapping(sample_dir, video_path)
sys.exit(0 if success else 1)