temp_dataset / dataprocess_code /test_speakervid_loader.py
xingzhaohu's picture
Initial upload
4b3a024 verified
import os
import subprocess
import cv2
import numpy as np
from tqdm import tqdm
from speakervid_data_talkinghead import SpeakerVidTalkingDataset
S3_ENDPOINT_URL = "https://t3.storage.dev"
AWS_ACCESS_KEY_ID = "tid_cqKPLHboixMUUQxq_ImANLFwrehWmWZHlEaPZXzXNbKxf_fugg"
AWS_SECRET_ACCESS_KEY = "tsec_CXLclBpmOD2blVqdL+smpI52cOxQiXs-pH-INnfU6yfhc1MAajUTpI7xWO+5YAyLwyXjpq"
def _visualize_face_mask(video, face_mask, out_path, fps=25, alpha=0.5) -> None:
frames = (
((video + 1.0) * 127.5)
.clamp(0, 255)
.byte()
.permute(0, 2, 3, 1)
.cpu()
.numpy()
)
mask = face_mask.squeeze(1).cpu().numpy()
h, w = frames.shape[1], frames.shape[2]
if mask.shape[1] != h or mask.shape[2] != w:
resized = np.zeros((mask.shape[0], h, w), dtype=np.float32)
for i in range(mask.shape[0]):
resized[i] = cv2.resize(mask[i], (w, h), interpolation=cv2.INTER_NEAREST)
mask = resized
writer = cv2.VideoWriter(
out_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)
)
for i, frame in enumerate(frames):
overlay = np.zeros_like(frame)
overlay[:, :, 2] = (mask[i] > 0.5).astype(np.uint8) * 255
blended = cv2.addWeighted(frame, 1.0, overlay, alpha, 0.0)
writer.write(cv2.cvtColor(blended, cv2.COLOR_RGB2BGR))
writer.release()
def main() -> None:
config = {
"jsonl_path": "/mnt/nfs/datasets/SpeakerVid-5M/metadb_code/talking_top15_syncc.jsonl",
"existing_tsv_path": "/mnt/nfs/datasets/SpeakerVid-5M/dataprocess_code/output_top15/existing.tsv",
"audio_feature_model_id": "facebook/wav2vec2-base-960h",
"filter_enabled": True,
"sync_d_threshold": 10, # Sync-D (lower is better)
"sync_c_threshold": 6.5, # Sync-C (higher is better)
"debug_audio": False,
"use_placeholder_caption": True
}
# Optional: override creds via env or config if needed.
# if os.getenv("AWS_ACCESS_KEY_ID") and os.getenv("AWS_SECRET_ACCESS_KEY"):
config["aws_access_key_id"] = AWS_ACCESS_KEY_ID # os.getenv("AWS_ACCESS_KEY_ID")
config["aws_secret_access_key"] = AWS_SECRET_ACCESS_KEY #os.getenv("AWS_SECRET_ACCESS_KEY")
dataset = SpeakerVidTalkingDataset(config=config)
out_dir = os.path.join(os.getcwd(), "visual_tmp")
os.makedirs(out_dir, exist_ok=True)
for idx in tqdm(range(min(50, len(dataset)))):
sample = dataset[idx]
print("json_name:", sample.get("json_name"))
print("pixel_values_vid shape:", tuple(sample["pixel_values_vid"].shape))
print("audio_input_values shape:", tuple(sample["audio_input_values"].shape))
print("caption:", sample.get("caption_content"))
# if sample.get("face_mask") is not None:
# out_path = os.path.join(out_dir, f"sample_{idx:04d}_mask.mp4")
# _visualize_face_mask(sample["pixel_values_vid"], sample["face_mask"], out_path)
# video = sample["pixel_values_vid"]
# frames = ((video + 1.0) * 127.5).clamp(0, 255).byte().permute(0, 2, 3, 1).cpu().numpy()
# out_path = os.path.join(out_dir, f"sample_{idx:04d}.mp4")
# h, w = frames.shape[1], frames.shape[2]
# writer = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), 25, (w, h))
# for frame in frames:
# writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
# writer.release()
# audio_clip = sample.get("audio_clip")
# if audio_clip is None:
# continue
# audio_path = os.path.join(out_dir, f"sample_{idx:04d}.wav")
# audio_sr = int(sample.get("audio_sample_rate", 16000))
# audio_clip = np.asarray(audio_clip, dtype=np.float32)
# try:
# import soundfile as sf
# sf.write(audio_path, audio_clip, audio_sr)
# except Exception:
# from scipy.io import wavfile
# wavfile.write(audio_path, audio_sr, audio_clip)
# mux_path = os.path.join(out_dir, f"sample_{idx:04d}_av.mp4")
# subprocess.run(
# [
# "ffmpeg",
# "-y",
# "-i",
# out_path,
# "-i",
# audio_path,
# "-c:v",
# "copy",
# "-c:a",
# "aac",
# "-shortest",
# mux_path,
# ],
# check=True,
# stdout=subprocess.DEVNULL,
# stderr=subprocess.DEVNULL,
# )
if __name__ == "__main__":
main()