|
|
import os |
|
|
import subprocess |
|
|
|
|
|
import cv2 |
|
|
import numpy as np |
|
|
|
|
|
from tqdm import tqdm |
|
|
|
|
|
from speakervid_data_talkinghead import SpeakerVidTalkingDataset |
|
|
|
|
|
S3_ENDPOINT_URL = "https://t3.storage.dev" |
|
|
AWS_ACCESS_KEY_ID = "tid_cqKPLHboixMUUQxq_ImANLFwrehWmWZHlEaPZXzXNbKxf_fugg" |
|
|
AWS_SECRET_ACCESS_KEY = "tsec_CXLclBpmOD2blVqdL+smpI52cOxQiXs-pH-INnfU6yfhc1MAajUTpI7xWO+5YAyLwyXjpq" |
|
|
|
|
|
|
|
|
def _visualize_face_mask(video, face_mask, out_path, fps=25, alpha=0.5) -> None: |
|
|
frames = ( |
|
|
((video + 1.0) * 127.5) |
|
|
.clamp(0, 255) |
|
|
.byte() |
|
|
.permute(0, 2, 3, 1) |
|
|
.cpu() |
|
|
.numpy() |
|
|
) |
|
|
mask = face_mask.squeeze(1).cpu().numpy() |
|
|
h, w = frames.shape[1], frames.shape[2] |
|
|
if mask.shape[1] != h or mask.shape[2] != w: |
|
|
resized = np.zeros((mask.shape[0], h, w), dtype=np.float32) |
|
|
for i in range(mask.shape[0]): |
|
|
resized[i] = cv2.resize(mask[i], (w, h), interpolation=cv2.INTER_NEAREST) |
|
|
mask = resized |
|
|
|
|
|
writer = cv2.VideoWriter( |
|
|
out_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h) |
|
|
) |
|
|
for i, frame in enumerate(frames): |
|
|
overlay = np.zeros_like(frame) |
|
|
overlay[:, :, 2] = (mask[i] > 0.5).astype(np.uint8) * 255 |
|
|
blended = cv2.addWeighted(frame, 1.0, overlay, alpha, 0.0) |
|
|
writer.write(cv2.cvtColor(blended, cv2.COLOR_RGB2BGR)) |
|
|
writer.release() |
|
|
def main() -> None: |
|
|
config = { |
|
|
"jsonl_path": "/mnt/nfs/datasets/SpeakerVid-5M/metadb_code/talking_top15_syncc.jsonl", |
|
|
"existing_tsv_path": "/mnt/nfs/datasets/SpeakerVid-5M/dataprocess_code/output_top15/existing.tsv", |
|
|
"audio_feature_model_id": "facebook/wav2vec2-base-960h", |
|
|
"filter_enabled": True, |
|
|
"sync_d_threshold": 10, |
|
|
"sync_c_threshold": 6.5, |
|
|
"debug_audio": False, |
|
|
"use_placeholder_caption": True |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
config["aws_access_key_id"] = AWS_ACCESS_KEY_ID |
|
|
config["aws_secret_access_key"] = AWS_SECRET_ACCESS_KEY |
|
|
|
|
|
dataset = SpeakerVidTalkingDataset(config=config) |
|
|
|
|
|
out_dir = os.path.join(os.getcwd(), "visual_tmp") |
|
|
os.makedirs(out_dir, exist_ok=True) |
|
|
|
|
|
for idx in tqdm(range(min(50, len(dataset)))): |
|
|
sample = dataset[idx] |
|
|
print("json_name:", sample.get("json_name")) |
|
|
print("pixel_values_vid shape:", tuple(sample["pixel_values_vid"].shape)) |
|
|
print("audio_input_values shape:", tuple(sample["audio_input_values"].shape)) |
|
|
print("caption:", sample.get("caption_content")) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|