|
|
""" |
|
|
SpeakerVid dataset loader for fixed-length talking-head clips from S3/Tigris. |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import json |
|
|
import os |
|
|
import random |
|
|
import tempfile |
|
|
import threading |
|
|
import warnings |
|
|
from typing import Any, Dict, List, Optional, Sequence |
|
|
|
|
|
import boto3 |
|
|
from botocore.config import Config |
|
|
from botocore.exceptions import ClientError |
|
|
from decord import VideoReader |
|
|
import cv2 |
|
|
import librosa |
|
|
import numpy as np |
|
|
import torch |
|
|
from torch.utils.data import Dataset |
|
|
from transformers import Wav2Vec2Processor |
|
|
from tqdm import tqdm |
|
|
|
|
|
import ffmpeg |
|
|
from video_utils import load_video_rgb_fchw |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
if hasattr(librosa, "set_audio_backend"): |
|
|
librosa.set_audio_backend("audioread") |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
warnings.filterwarnings("ignore", message="PySoundFile failed. Trying audioread instead.") |
|
|
warnings.filterwarnings( |
|
|
"ignore", |
|
|
category=FutureWarning, |
|
|
message=r"librosa\.core\.audio\.__audioread_load.*", |
|
|
) |
|
|
|
|
|
_thread_local = threading.local() |
|
|
|
|
|
|
|
|
def _load_defaults_from_split_clip_from_s3() -> Dict[str, Optional[str]]: |
|
|
""" |
|
|
Best-effort compatibility with split_clip_from_s3.py (same directory). |
|
|
""" |
|
|
try: |
|
|
import split_clip_from_s3 as cfg |
|
|
|
|
|
return { |
|
|
"endpoint_url": getattr(cfg, "S3_ENDPOINT_URL", None), |
|
|
"region_name": getattr(cfg, "AWS_REGION", None), |
|
|
"bucket": getattr(cfg, "S3_BUCKET", None), |
|
|
"aws_access_key_id": getattr(cfg, "AWS_ACCESS_KEY_ID", None), |
|
|
"aws_secret_access_key": getattr(cfg, "AWS_SECRET_ACCESS_KEY", None), |
|
|
} |
|
|
except Exception: |
|
|
return {} |
|
|
|
|
|
|
|
|
def _get_s3_client( |
|
|
*, |
|
|
endpoint_url: str, |
|
|
region_name: str, |
|
|
aws_access_key_id: Optional[str], |
|
|
aws_secret_access_key: Optional[str], |
|
|
) -> Any: |
|
|
cache_key = ("s3_client", endpoint_url, region_name, aws_access_key_id, aws_secret_access_key) |
|
|
if not hasattr(_thread_local, "cache"): |
|
|
_thread_local.cache = {} |
|
|
cache = _thread_local.cache |
|
|
if cache_key not in cache: |
|
|
kwargs: Dict[str, Any] = dict( |
|
|
service_name="s3", |
|
|
endpoint_url=endpoint_url, |
|
|
region_name=region_name, |
|
|
config=Config(signature_version="s3v4"), |
|
|
) |
|
|
if aws_access_key_id and aws_secret_access_key: |
|
|
kwargs["aws_access_key_id"] = aws_access_key_id |
|
|
kwargs["aws_secret_access_key"] = aws_secret_access_key |
|
|
cache[cache_key] = boto3.client(**kwargs) |
|
|
return cache[cache_key] |
|
|
|
|
|
|
|
|
def _download_from_s3( |
|
|
bucket: str, |
|
|
key: str, |
|
|
local_path: str, |
|
|
*, |
|
|
endpoint_url: str, |
|
|
region_name: str, |
|
|
aws_access_key_id: Optional[str], |
|
|
aws_secret_access_key: Optional[str], |
|
|
) -> None: |
|
|
client = _get_s3_client( |
|
|
endpoint_url=endpoint_url, |
|
|
region_name=region_name, |
|
|
aws_access_key_id=aws_access_key_id, |
|
|
aws_secret_access_key=aws_secret_access_key, |
|
|
) |
|
|
try: |
|
|
client.download_file(bucket, key, local_path) |
|
|
except ClientError as e: |
|
|
raise RuntimeError(f"download failed: s3://{bucket}/{key} ({e})") from e |
|
|
|
|
|
|
|
|
def _is_audio_silent(audio_array: np.ndarray, threshold: float = 0.001) -> bool: |
|
|
if audio_array.size == 0: |
|
|
return True |
|
|
rms = float(np.sqrt(np.mean(np.square(audio_array, dtype=np.float32)))) |
|
|
return rms < threshold |
|
|
|
|
|
|
|
|
def _read_labels_from_video(video_path: str) -> Optional[np.ndarray]: |
|
|
"""Read grayscale label video back as numpy array: (T, H, W), uint8.""" |
|
|
try: |
|
|
probe = ffmpeg.probe(video_path) |
|
|
video_info = next(s for s in probe["streams"] if s["codec_type"] == "video") |
|
|
width = int(video_info["width"]) |
|
|
height = int(video_info["height"]) |
|
|
|
|
|
out, _ = ( |
|
|
ffmpeg.input(video_path) |
|
|
.output("pipe:", format="rawvideo", pix_fmt="gray") |
|
|
.run(capture_stdout=True, capture_stderr=True) |
|
|
) |
|
|
|
|
|
decoded = np.frombuffer(out, np.uint8).reshape((-1, height, width)) |
|
|
return decoded |
|
|
except Exception as e: |
|
|
print(f"Error reading label video {video_path}: {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
def _compute_lip_bboxes( |
|
|
labels: np.ndarray, |
|
|
lip_scale: float = 1.2, |
|
|
nose_labels: Sequence[int] = (2,), |
|
|
face_labels: Sequence[int] = (1,), |
|
|
) -> List[Optional[tuple[int, int, int, int]]]: |
|
|
"""Compute per-frame mouth-region bboxes using nose + face masks, with temporal interpolation.""" |
|
|
if labels.ndim != 3: |
|
|
raise ValueError("labels must have shape (T, H, W)") |
|
|
|
|
|
T, H, W = labels.shape |
|
|
lip_scale = max(float(lip_scale), 1.0) |
|
|
|
|
|
raw_bboxes: List[Optional[tuple[int, int, int, int]]] = [None] * T |
|
|
|
|
|
for t in range(T): |
|
|
frame_labels = labels[t] |
|
|
|
|
|
nose_mask = np.isin(frame_labels, nose_labels) |
|
|
face_mask = np.isin(frame_labels, face_labels) |
|
|
|
|
|
if not np.any(nose_mask) or not np.any(face_mask): |
|
|
continue |
|
|
|
|
|
nose_ys, _ = np.where(nose_mask) |
|
|
y_top = float(nose_ys.max()) |
|
|
|
|
|
face_ys, face_xs = np.where(face_mask) |
|
|
y_bottom = float(face_ys.max()) |
|
|
x_left = float(face_xs.min()) |
|
|
x_right = float(face_xs.max()) |
|
|
|
|
|
if y_bottom <= y_top: |
|
|
continue |
|
|
|
|
|
x_min = x_left |
|
|
x_max = x_right |
|
|
y_min = y_top |
|
|
y_max = y_bottom |
|
|
|
|
|
w = x_max - x_min + 1.0 |
|
|
h = y_max - y_min + 1.0 |
|
|
cx = (x_min + x_max) / 2.0 |
|
|
cy = (y_min + y_max) / 2.0 |
|
|
|
|
|
new_w = w * lip_scale |
|
|
new_h = h * lip_scale |
|
|
|
|
|
x_min_s = int(round(cx - new_w / 2.0)) |
|
|
x_max_s = int(round(cx + new_w / 2.0)) |
|
|
y_min_s = int(round(cy - new_h / 2.0)) |
|
|
y_max_s = int(round(cy + new_h / 2.0)) |
|
|
|
|
|
x_min_s = max(0, min(x_min_s, W - 1)) |
|
|
x_max_s = max(0, min(x_max_s, W - 1)) |
|
|
y_min_s = max(0, min(y_min_s, H - 1)) |
|
|
y_max_s = max(0, min(y_max_s, H - 1)) |
|
|
|
|
|
if x_max_s <= x_min_s or y_max_s <= y_min_s: |
|
|
continue |
|
|
|
|
|
raw_bboxes[t] = (x_min_s, y_min_s, x_max_s, y_max_s) |
|
|
|
|
|
if not any(bb is not None for bb in raw_bboxes): |
|
|
return raw_bboxes |
|
|
|
|
|
coords: List[List[Optional[int]]] = [[None] * T for _ in range(4)] |
|
|
for t, bb in enumerate(raw_bboxes): |
|
|
if bb is None: |
|
|
continue |
|
|
for d in range(4): |
|
|
coords[d][t] = bb[d] |
|
|
|
|
|
for d in range(4): |
|
|
keyframes = [(t, coords[d][t]) for t in range(T) if coords[d][t] is not None] |
|
|
if not keyframes: |
|
|
continue |
|
|
|
|
|
first_idx, first_val = keyframes[0] |
|
|
for t in range(0, first_idx): |
|
|
coords[d][t] = first_val |
|
|
|
|
|
for (i, v0), (j, v1) in zip(keyframes, keyframes[1:]): |
|
|
coords[d][i] = v0 |
|
|
coords[d][j] = v1 |
|
|
gap = j - i |
|
|
if gap <= 1: |
|
|
continue |
|
|
for t in range(i + 1, j): |
|
|
alpha = (t - i) / float(gap) |
|
|
interp_val = int(round(v0 + (v1 - v0) * alpha)) |
|
|
coords[d][t] = interp_val |
|
|
|
|
|
last_idx, last_val = keyframes[-1] |
|
|
for t in range(last_idx + 1, T): |
|
|
coords[d][t] = last_val |
|
|
|
|
|
final_bboxes: List[Optional[tuple[int, int, int, int]]] = [None] * T |
|
|
for t in range(T): |
|
|
if all(coords[d][t] is not None for d in range(4)): |
|
|
final_bboxes[t] = ( |
|
|
int(coords[0][t]), |
|
|
int(coords[1][t]), |
|
|
int(coords[2][t]), |
|
|
int(coords[3][t]), |
|
|
) |
|
|
|
|
|
return final_bboxes |
|
|
|
|
|
|
|
|
def _bboxes_to_masks( |
|
|
bboxes: List[Optional[tuple[int, int, int, int]]], H: int, W: int |
|
|
) -> np.ndarray: |
|
|
"""Convert per-frame bboxes to binary masks (T, H, W) with 1 inside bbox, 0 outside.""" |
|
|
T = len(bboxes) |
|
|
masks = np.zeros((T, H, W), dtype=np.float32) |
|
|
for t, bb in enumerate(bboxes): |
|
|
if bb is None: |
|
|
continue |
|
|
x_min, y_min, x_max, y_max = bb |
|
|
y1 = int(max(0, min(y_min, H - 1))) |
|
|
y2 = int(max(0, min(y_max, H - 1))) |
|
|
x1 = int(max(0, min(x_min, W - 1))) |
|
|
x2 = int(max(0, min(x_max, W - 1))) |
|
|
if x2 <= x1 or y2 <= y1: |
|
|
continue |
|
|
masks[t, y1 : y2 + 1, x1 : x2 + 1] = 1.0 |
|
|
return masks |
|
|
|
|
|
|
|
|
def _infer_label_path(label_root: str, json_name: str) -> Optional[str]: |
|
|
"""Infer face-parse label video path from json_name.""" |
|
|
base, _ = os.path.splitext(json_name) |
|
|
video_id = base[:11] if len(base) >= 11 else base |
|
|
label_path = os.path.join(label_root, video_id, base + ".mkv") |
|
|
if os.path.exists(label_path): |
|
|
return label_path |
|
|
return None |
|
|
|
|
|
|
|
|
def _load_caption_index( |
|
|
caption_root: Optional[str], index_path: Optional[str] |
|
|
) -> Dict[str, str]: |
|
|
if not caption_root or not os.path.isdir(caption_root): |
|
|
return {} |
|
|
if index_path and os.path.isfile(index_path): |
|
|
try: |
|
|
with open(index_path, "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
if isinstance(data, dict): |
|
|
return {str(k): str(v) for k, v in data.items()} |
|
|
except Exception as e: |
|
|
print(f"⚠️ Failed to load caption index {index_path}: {e}") |
|
|
|
|
|
mapping: Dict[str, str] = {} |
|
|
json_paths: List[str] = [] |
|
|
for root, _, files in os.walk(caption_root): |
|
|
for name in files: |
|
|
if name.endswith(".json"): |
|
|
json_paths.append(os.path.join(root, name)) |
|
|
print("#Num of json_paths:",len(json_paths)) |
|
|
|
|
|
for path in tqdm(json_paths, desc="Indexing captions", unit="file"): |
|
|
name = os.path.basename(path) |
|
|
if name in mapping and mapping[name] != path: |
|
|
print(f"⚠️ Duplicate caption name {name}: {path}") |
|
|
continue |
|
|
mapping[name] = path |
|
|
|
|
|
if index_path: |
|
|
try: |
|
|
with open(index_path, "w", encoding="utf-8") as f: |
|
|
json.dump(mapping, f, ensure_ascii=True) |
|
|
except Exception as e: |
|
|
print(f"⚠️ Failed to write caption index {index_path}: {e}") |
|
|
|
|
|
return mapping |
|
|
|
|
|
|
|
|
def _load_caption_text( |
|
|
caption_path: str, fields: Sequence[str], fallback: str |
|
|
) -> str: |
|
|
try: |
|
|
with open(caption_path, "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
except Exception as e: |
|
|
print(f"⚠️ Failed to read caption {caption_path}: {e}") |
|
|
return fallback |
|
|
|
|
|
for key in fields: |
|
|
value = data.get(key) |
|
|
if isinstance(value, str) and value.strip(): |
|
|
return value.strip() |
|
|
return fallback |
|
|
|
|
|
|
|
|
def _load_existing_tsv(path: str) -> Dict[str, Dict[str, str]]: |
|
|
mapping: Dict[str, Dict[str, str]] = {} |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
header = f.readline() |
|
|
for line in f: |
|
|
line = line.strip() |
|
|
if not line: |
|
|
continue |
|
|
parts = line.split("\t") |
|
|
if len(parts) < 3: |
|
|
continue |
|
|
json_name, mp4_key, wav_key = parts[0], parts[1], parts[2] |
|
|
mapping[json_name] = {"mp4_key": mp4_key, "wav_key": wav_key} |
|
|
return mapping |
|
|
|
|
|
|
|
|
def _extract_sync_pair( |
|
|
sync_value: Any, sync_key: str = "0", index: int = 0 |
|
|
) -> Optional[tuple[float, float]]: |
|
|
def _from_items(items: Any, idx: int) -> Optional[tuple[float, float]]: |
|
|
if not isinstance(items, list) or not items: |
|
|
return None |
|
|
if len(items) >= 2 and isinstance(items[0], (int, float)): |
|
|
return float(items[0]), float(items[1]) |
|
|
if idx >= len(items): |
|
|
return None |
|
|
item = items[idx] |
|
|
if isinstance(item, list) and len(item) >= 2: |
|
|
if isinstance(item[0], (int, float)) and isinstance(item[1], (int, float)): |
|
|
return float(item[0]), float(item[1]) |
|
|
return None |
|
|
|
|
|
if sync_value is None: |
|
|
return None |
|
|
if isinstance(sync_value, dict): |
|
|
primary = _from_items(sync_value.get(sync_key), index) |
|
|
if primary is not None: |
|
|
return primary |
|
|
for val in sync_value.values(): |
|
|
candidate = _from_items(val, 0) |
|
|
if candidate is not None: |
|
|
return candidate |
|
|
return None |
|
|
if isinstance(sync_value, list): |
|
|
return _from_items(sync_value, index) |
|
|
return None |
|
|
|
|
|
|
|
|
class SpeakerVidTalkingDataset(Dataset): |
|
|
""" |
|
|
SpeakerVid talking-head dataset based on S3/Tigris clips + metainfo JSONL. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: Optional[dict] = None, split: str = "train"): |
|
|
self.config = config or {} |
|
|
self.split = split |
|
|
|
|
|
self.jsonl_path = self.config.get( |
|
|
"jsonl_path", |
|
|
"/mnt/nfs/datasets/SpeakerVid-5M/metadb_code/talking_top5_syncc.jsonl", |
|
|
) |
|
|
self.existing_tsv_path = self.config.get( |
|
|
"existing_tsv_path", |
|
|
"/mnt/nfs/datasets/SpeakerVid-5M/dataprocess_code/output_top5/existing.tsv", |
|
|
) |
|
|
|
|
|
res = self.config.get("resolution", [720, 1072]) |
|
|
if isinstance(res, (list, tuple)) and len(res) == 2: |
|
|
self.sample_size = [int(res[0]), int(res[1])] |
|
|
else: |
|
|
self.sample_size = [720, 1072] |
|
|
self.n_sample_frames = int(self.config.get("n_sample_frames", 49)) |
|
|
|
|
|
self.sample_rate = int(self.config.get("audio_sample_rate", 16000)) |
|
|
self.processor_model_id = self.config.get( |
|
|
"audio_feature_model_id", "facebook/wav2vec2-base-960h" |
|
|
) |
|
|
self.processor = Wav2Vec2Processor.from_pretrained(self.processor_model_id) |
|
|
|
|
|
self.caption_placeholder = str( |
|
|
self.config.get("caption_placeholder", "A character is talking") |
|
|
) |
|
|
self.use_placeholder_caption = bool( |
|
|
self.config.get("use_placeholder_caption", False) |
|
|
) |
|
|
self.caption_root = self.config.get( |
|
|
"caption_root", "/mnt/nfs/datasets/SpeakerVid-5M/anno/extracted" |
|
|
) |
|
|
default_index_path = os.path.join( |
|
|
os.path.dirname(__file__), "caption_index.json" |
|
|
) |
|
|
self.caption_index_path = self.config.get( |
|
|
"caption_index_path", default_index_path |
|
|
) |
|
|
self.caption_fields = self.config.get( |
|
|
"caption_fields", ["caption2", "caption1", "ASR"] |
|
|
) |
|
|
self.max_trials = int(self.config.get("max_trials", 8)) |
|
|
self.debug_audio = bool(self.config.get("debug_audio", False)) |
|
|
self.filter_enabled = bool(self.config.get("filter_enabled", False)) |
|
|
self.sync_key = str(self.config.get("sync_key", "0")) |
|
|
self.sync_index = int(self.config.get("sync_index", 0)) |
|
|
|
|
|
self.sync_d_threshold = float(self.config.get("sync_d_threshold", 6.0)) |
|
|
|
|
|
self.sync_c_threshold = float(self.config.get("sync_c_threshold", 8.0)) |
|
|
self.label_root = self.config.get( |
|
|
"label_root", "/mnt/nfs/datasets/SpeakerVid-5M/face_parse_labels" |
|
|
) |
|
|
|
|
|
cfg_defaults = _load_defaults_from_split_clip_from_s3() |
|
|
self.endpoint_url = ( |
|
|
self.config.get("endpoint_url") |
|
|
or cfg_defaults.get("endpoint_url") |
|
|
or os.getenv("S3_ENDPOINT_URL") |
|
|
or "https://t3.storage.dev" |
|
|
) |
|
|
self.region_name = ( |
|
|
self.config.get("region_name") |
|
|
or cfg_defaults.get("region_name") |
|
|
or os.getenv("AWS_REGION") |
|
|
or "auto" |
|
|
) |
|
|
self.bucket = ( |
|
|
self.config.get("bucket") |
|
|
or cfg_defaults.get("bucket") |
|
|
or os.getenv("S3_BUCKET") |
|
|
or "youtube-downloads" |
|
|
) |
|
|
self.aws_access_key_id = ( |
|
|
self.config.get("aws_access_key_id") |
|
|
or cfg_defaults.get("aws_access_key_id") |
|
|
or os.getenv("AWS_ACCESS_KEY_ID") |
|
|
) |
|
|
self.aws_secret_access_key = ( |
|
|
self.config.get("aws_secret_access_key") |
|
|
or cfg_defaults.get("aws_secret_access_key") |
|
|
or os.getenv("AWS_SECRET_ACCESS_KEY") |
|
|
) |
|
|
|
|
|
if not self.aws_access_key_id or not self.aws_secret_access_key: |
|
|
raise RuntimeError( |
|
|
"Missing S3 credentials. Set AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY " |
|
|
"or keep split_clip_from_s3.py nearby." |
|
|
) |
|
|
|
|
|
existing = _load_existing_tsv(self.existing_tsv_path) |
|
|
if self.use_placeholder_caption: |
|
|
self.caption_index = {} |
|
|
else: |
|
|
self.caption_index = _load_caption_index( |
|
|
self.caption_root, self.caption_index_path |
|
|
) |
|
|
self.samples = self._load_samples(self.jsonl_path, existing) |
|
|
self._report_filter_stats() |
|
|
|
|
|
print( |
|
|
f"🎯 SpeakerVidTalkingDataset loaded: {len(self.samples)} samples " |
|
|
f"(jsonl={self.jsonl_path}, existing={self.existing_tsv_path})" |
|
|
) |
|
|
|
|
|
def __len__(self) -> int: |
|
|
return len(self.samples) |
|
|
|
|
|
def _load_samples( |
|
|
self, jsonl_path: str, existing: Dict[str, Dict[str, str]] |
|
|
) -> List[Dict[str, Any]]: |
|
|
samples: List[Dict[str, Any]] = [] |
|
|
self._all_sync_c_scores: List[float] = [] |
|
|
self._all_sync_d_scores: List[float] = [] |
|
|
self._all_durations: List[float] = [] |
|
|
with open(jsonl_path, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
line = line.strip() |
|
|
if not line: |
|
|
continue |
|
|
try: |
|
|
record = json.loads(line) |
|
|
except json.JSONDecodeError: |
|
|
continue |
|
|
json_name = record.get("json_name") |
|
|
if not json_name or json_name not in existing: |
|
|
continue |
|
|
keys = existing[json_name] |
|
|
sync_val = record.get("sync") or {} |
|
|
sync_pair = _extract_sync_pair( |
|
|
sync_val, sync_key=self.sync_key, index=self.sync_index |
|
|
) |
|
|
conf_val = record.get("conf") |
|
|
try: |
|
|
conf_score = float(conf_val) if conf_val is not None else None |
|
|
except (TypeError, ValueError): |
|
|
conf_score = None |
|
|
if sync_pair is not None: |
|
|
sync_c_score, sync_d_score = sync_pair |
|
|
else: |
|
|
sync_c_score = conf_score |
|
|
sync_d_score = None |
|
|
duration_val = record.get("duration") |
|
|
try: |
|
|
duration_score = float(duration_val) if duration_val is not None else None |
|
|
except (TypeError, ValueError): |
|
|
duration_score = None |
|
|
|
|
|
if sync_c_score is not None: |
|
|
self._all_sync_c_scores.append(sync_c_score) |
|
|
if sync_d_score is not None: |
|
|
self._all_sync_d_scores.append(sync_d_score) |
|
|
if duration_score is not None: |
|
|
self._all_durations.append(duration_score) |
|
|
|
|
|
if self.filter_enabled: |
|
|
if sync_d_score is not None and sync_d_score > self.sync_d_threshold: |
|
|
continue |
|
|
if sync_c_score is not None and sync_c_score < self.sync_c_threshold: |
|
|
continue |
|
|
label_path = ( |
|
|
_infer_label_path(self.label_root, json_name) |
|
|
if os.path.isdir(self.label_root) |
|
|
else None |
|
|
) |
|
|
if label_path is None or not os.path.exists(label_path): |
|
|
continue |
|
|
caption_path = None |
|
|
if not self.use_placeholder_caption: |
|
|
caption_path = self.caption_index.get(json_name) |
|
|
if caption_path is None or not os.path.exists(caption_path): |
|
|
continue |
|
|
samples.append( |
|
|
{ |
|
|
"json_name": json_name, |
|
|
"mp4_key": keys["mp4_key"], |
|
|
"wav_key": keys["wav_key"], |
|
|
"label_path": label_path, |
|
|
"caption_path": caption_path, |
|
|
"sync": sync_val, |
|
|
"sync_c": sync_c_score, |
|
|
"sync_d": sync_d_score, |
|
|
"dover": record.get("dover"), |
|
|
"duration": duration_score, |
|
|
} |
|
|
) |
|
|
return samples |
|
|
|
|
|
def _report_filter_stats(self) -> None: |
|
|
def _stats(values: List[float]) -> tuple[int, float, float, float]: |
|
|
if not values: |
|
|
return 0, float("nan"), float("nan"), float("nan") |
|
|
count = len(values) |
|
|
return count, min(values), max(values), sum(values) / count |
|
|
|
|
|
all_sync_c = getattr(self, "_all_sync_c_scores", []) |
|
|
all_sync_d = getattr(self, "_all_sync_d_scores", []) |
|
|
kept_sync_c = [ |
|
|
s.get("sync_c") for s in self.samples if s.get("sync_c") is not None |
|
|
] |
|
|
kept_sync_d = [ |
|
|
s.get("sync_d") for s in self.samples if s.get("sync_d") is not None |
|
|
] |
|
|
all_durations = getattr(self, "_all_durations", []) |
|
|
kept_durations = [ |
|
|
s.get("duration") for s in self.samples if s.get("duration") is not None |
|
|
] |
|
|
|
|
|
ac_count, ac_min, ac_max, ac_mean = _stats(all_sync_c) |
|
|
ad_count, ad_min, ad_max, ad_mean = _stats(all_sync_d) |
|
|
kc_count, kc_min, kc_max, kc_mean = _stats(kept_sync_c) |
|
|
kd_count, kd_min, kd_max, kd_mean = _stats(kept_sync_d) |
|
|
all_hours = sum(all_durations) / 3600.0 if all_durations else 0.0 |
|
|
kept_hours = sum(kept_durations) / 3600.0 if kept_durations else 0.0 |
|
|
|
|
|
print( |
|
|
f"📊 All sync-c stats: count={ac_count}, min={ac_min:.3f}, max={ac_max:.3f}, mean={ac_mean:.3f}" |
|
|
) |
|
|
print( |
|
|
f"📊 All sync-d stats: count={ad_count}, min={ad_min:.3f}, max={ad_max:.3f}, mean={ad_mean:.3f}" |
|
|
) |
|
|
print(f"📊 All duration: total_hours={all_hours:.2f}") |
|
|
if self.filter_enabled: |
|
|
print( |
|
|
f"📊 Filtered sync-c stats: count={kc_count}, min={kc_min:.3f}, max={kc_max:.3f}, mean={kc_mean:.3f}" |
|
|
) |
|
|
print( |
|
|
f"📊 Filtered sync-d stats: count={kd_count}, min={kd_min:.3f}, max={kd_max:.3f}, mean={kd_mean:.3f}" |
|
|
) |
|
|
print(f"📊 Filtered duration: total_hours={kept_hours:.2f}") |
|
|
|
|
|
def _load_clip(self, sample: Dict[str, Any]) -> Dict[str, Any]: |
|
|
mp4_key = sample["mp4_key"] |
|
|
wav_key = sample["wav_key"] |
|
|
label_path = sample["label_path"] |
|
|
caption_path = sample["caption_path"] |
|
|
|
|
|
with tempfile.TemporaryDirectory(prefix="speakervid_clip_") as tmpdir: |
|
|
local_mp4 = os.path.join(tmpdir, os.path.basename(mp4_key)) |
|
|
local_wav = os.path.join(tmpdir, os.path.basename(wav_key)) |
|
|
|
|
|
_download_from_s3( |
|
|
self.bucket, |
|
|
mp4_key, |
|
|
local_mp4, |
|
|
endpoint_url=self.endpoint_url, |
|
|
region_name=self.region_name, |
|
|
aws_access_key_id=self.aws_access_key_id, |
|
|
aws_secret_access_key=self.aws_secret_access_key, |
|
|
) |
|
|
_download_from_s3( |
|
|
self.bucket, |
|
|
wav_key, |
|
|
local_wav, |
|
|
endpoint_url=self.endpoint_url, |
|
|
region_name=self.region_name, |
|
|
aws_access_key_id=self.aws_access_key_id, |
|
|
aws_secret_access_key=self.aws_secret_access_key, |
|
|
) |
|
|
|
|
|
labels = _read_labels_from_video(label_path) |
|
|
if labels is None or labels.ndim != 3: |
|
|
raise RuntimeError(f"failed to read labels: {label_path}") |
|
|
|
|
|
T_lab, H_lab, W_lab = labels.shape |
|
|
if T_lab < self.n_sample_frames: |
|
|
raise RuntimeError( |
|
|
f"label too short: frames={T_lab}, need={self.n_sample_frames}" |
|
|
) |
|
|
|
|
|
bboxes = _compute_lip_bboxes(labels) |
|
|
if not any(bb is not None for bb in bboxes): |
|
|
raise RuntimeError("no valid lip bboxes in labels") |
|
|
|
|
|
vr = VideoReader(local_mp4) |
|
|
total_frames = len(vr) |
|
|
|
|
|
max_start_total = min(total_frames, T_lab) - self.n_sample_frames |
|
|
if max_start_total < 0: |
|
|
raise RuntimeError( |
|
|
f"video/label too short: video_frames={total_frames}, " |
|
|
f"label_frames={T_lab}, need={self.n_sample_frames}" |
|
|
) |
|
|
start = random.randint(0, max_start_total) if max_start_total > 0 else 0 |
|
|
|
|
|
H, W = self.sample_size[0], self.sample_size[1] |
|
|
video = load_video_rgb_fchw( |
|
|
local_mp4, |
|
|
(W, H), |
|
|
start=start, |
|
|
count=self.n_sample_frames, |
|
|
accurate_seek=True, |
|
|
) |
|
|
if video is None or video.shape[0] < self.n_sample_frames: |
|
|
raise RuntimeError("failed to read video frames") |
|
|
if video.shape[0] > self.n_sample_frames: |
|
|
video = video[: self.n_sample_frames] |
|
|
|
|
|
try: |
|
|
fps = float(vr.get_avg_fps()) |
|
|
if not np.isfinite(fps) or fps <= 0: |
|
|
fps = 25.0 |
|
|
except Exception: |
|
|
fps = 25.0 |
|
|
|
|
|
audio_waveform, _ = librosa.load(local_wav, sr=self.sample_rate, mono=True) |
|
|
|
|
|
clip_start_time = start / fps |
|
|
clip_duration = self.n_sample_frames / fps |
|
|
clip_end_time = clip_start_time + clip_duration |
|
|
|
|
|
start_sample = int(max(0, clip_start_time * self.sample_rate)) |
|
|
end_sample = int(max(start_sample, clip_end_time * self.sample_rate)) |
|
|
end_sample = min(end_sample, audio_waveform.shape[0]) |
|
|
|
|
|
audio_clip = audio_waveform[start_sample:end_sample] |
|
|
if audio_clip.size == 0: |
|
|
audio_clip = audio_waveform |
|
|
if audio_clip.size == 0 or _is_audio_silent(audio_clip): |
|
|
raise RuntimeError("audio clip is silent or empty") |
|
|
|
|
|
audio_input_values = self.processor( |
|
|
audio_clip, |
|
|
sampling_rate=self.sample_rate, |
|
|
return_tensors="pt", |
|
|
).input_values[0] |
|
|
|
|
|
bboxes_window = bboxes[start : start + self.n_sample_frames] |
|
|
masks_lab = _bboxes_to_masks(bboxes_window, H_lab, W_lab) |
|
|
|
|
|
if (H_lab, W_lab) != (H, W): |
|
|
resized_masks = np.zeros( |
|
|
(self.n_sample_frames, H, W), dtype=np.float32 |
|
|
) |
|
|
for i in range(self.n_sample_frames): |
|
|
resized_masks[i] = cv2.resize( |
|
|
masks_lab[i], |
|
|
(W, H), |
|
|
interpolation=cv2.INTER_NEAREST, |
|
|
) |
|
|
masks_lab = resized_masks |
|
|
|
|
|
face_mask = torch.from_numpy(masks_lab).unsqueeze(1).float() |
|
|
|
|
|
if self.use_placeholder_caption: |
|
|
caption_text = self.caption_placeholder |
|
|
else: |
|
|
caption_text = _load_caption_text( |
|
|
caption_path, self.caption_fields, self.caption_placeholder |
|
|
) |
|
|
|
|
|
return { |
|
|
"pixel_values_vid": video, |
|
|
"face_mask": face_mask, |
|
|
"caption_content": caption_text, |
|
|
"prompt": caption_text, |
|
|
"video_length": self.n_sample_frames, |
|
|
"audio_input_values": audio_input_values, |
|
|
"audio_sample_rate": self.sample_rate, |
|
|
"audio_num_samples": int(audio_clip.shape[0]), |
|
|
"json_name": sample["json_name"], |
|
|
"mp4_key": sample["mp4_key"], |
|
|
"wav_key": sample["wav_key"], |
|
|
"sync": sample.get("sync"), |
|
|
"sync_c": sample.get("sync_c"), |
|
|
"sync_d": sample.get("sync_d"), |
|
|
"dover": sample.get("dover"), |
|
|
"audio_clip": audio_clip if self.debug_audio else None, |
|
|
} |
|
|
|
|
|
def __getitem__(self, idx: int) -> Dict[str, Any]: |
|
|
if len(self.samples) == 0: |
|
|
raise IndexError("SpeakerVidTalkingDataset has no samples") |
|
|
|
|
|
num_trials = min(self.max_trials, len(self.samples)) |
|
|
curr_idx = idx % len(self.samples) |
|
|
for _ in range(num_trials): |
|
|
sample = self.samples[curr_idx] |
|
|
try: |
|
|
return self._load_clip(sample) |
|
|
except Exception as e: |
|
|
print(f"⚠️ Error loading {sample.get('json_name')}: {e}") |
|
|
curr_idx = (curr_idx + 1) % len(self.samples) |
|
|
continue |
|
|
|
|
|
raise RuntimeError("No valid SpeakerVid samples found after retries.") |
|
|
|