Add files using upload-large-folder tool
Browse files- batch_eval-Copy1.py +135 -0
- batch_eval.py +145 -0
- config/__init__.py +0 -0
- config/base_config.yaml +66 -0
- config/dpo_base_config.yaml +88 -0
- config/eval_config.yaml +68 -0
- config/eval_for_dpo_base_config.yaml +66 -0
- config/train_config.yaml +40 -0
- config/train_dpo_config.yaml +43 -0
- docs/EVAL.md +22 -0
- docs/MODELS.md +50 -0
- docs/TRAINING.md +114 -0
- docs/index.html +149 -0
- docs/style.css +78 -0
- docs/style_videos.css +52 -0
- docs/video_gen.html +254 -0
- docs/video_main.html +98 -0
- docs/video_vgg.html +452 -0
- filter_dataset/av_align.py +213 -0
- filter_dataset/av_align_hpc.py +213 -0
- filter_dataset/extract_audio.py +28 -0
- filter_dataset/filter_vggsound.py +43 -0
- filter_dataset/get_testing_audio_gt.py +26 -0
- filter_dataset/get_train_audio_gt.py +26 -0
- filter_dataset/get_val_audio_gt.py +26 -0
- gradio_demo.py +339 -0
- mmaudio/runner.py +938 -0
- reward_models/av_align.py +243 -0
- reward_models/cavp.py +128 -0
- reward_models/clap.py +95 -0
- reward_models/clap_multi_gpu.py +214 -0
- reward_models/ib_at_sync.sh +8 -0
- reward_models/ib_sync.py +325 -0
- reward_models/ib_sync.sh +47 -0
- reward_models/multi_reward_ib_av_at_desync_models.py +93 -0
- reward_models/multi_reward_ib_desync_models.py +53 -0
- reward_models/multi_reward_models.py +53 -0
- reward_models/multi_reward_models_new.py +57 -0
- runer_scripts_cmd.sh +87 -0
- train_dpo.py +216 -0
batch_eval-Copy1.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
import hydra
|
| 6 |
+
import torch
|
| 7 |
+
import torch.distributed as distributed
|
| 8 |
+
import torchaudio
|
| 9 |
+
from hydra.core.hydra_config import HydraConfig
|
| 10 |
+
from omegaconf import DictConfig
|
| 11 |
+
from tqdm import tqdm
|
| 12 |
+
|
| 13 |
+
from mmaudio.data.data_setup import setup_eval_dataset
|
| 14 |
+
from mmaudio.eval_utils import ModelConfig, all_model_cfg, generate, make_video, make_video_new, load_video # todo Feb 24
|
| 15 |
+
from mmaudio.model.flow_matching import FlowMatching
|
| 16 |
+
from mmaudio.model.networks_new import MMAudio, get_my_mmaudio
|
| 17 |
+
from mmaudio.model.utils.features_utils import FeaturesUtils
|
| 18 |
+
|
| 19 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 20 |
+
torch.backends.cudnn.allow_tf32 = True
|
| 21 |
+
|
| 22 |
+
local_rank = int(os.environ['LOCAL_RANK'])
|
| 23 |
+
world_size = int(os.environ['WORLD_SIZE'])
|
| 24 |
+
log = logging.getLogger()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@torch.inference_mode()
|
| 28 |
+
@hydra.main(version_base='1.3.2', config_path='config', config_name='eval_config.yaml')
|
| 29 |
+
def main(cfg: DictConfig):
|
| 30 |
+
device = 'cuda'
|
| 31 |
+
torch.cuda.set_device(local_rank)
|
| 32 |
+
|
| 33 |
+
if cfg.model not in all_model_cfg:
|
| 34 |
+
raise ValueError(f'Unknown model variant: {cfg.model}')
|
| 35 |
+
model: ModelConfig = all_model_cfg[cfg.model]
|
| 36 |
+
#model.download_if_needed()
|
| 37 |
+
seq_cfg = model.seq_cfg
|
| 38 |
+
|
| 39 |
+
run_dir = Path(HydraConfig.get().run.dir)
|
| 40 |
+
if cfg.output_name is None:
|
| 41 |
+
output_dir = run_dir / cfg.dataset
|
| 42 |
+
else:
|
| 43 |
+
output_dir = run_dir / f'{cfg.dataset}-{cfg.output_name}'
|
| 44 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 45 |
+
|
| 46 |
+
# todo
|
| 47 |
+
output_dir_audio = output_dir / 'audios'
|
| 48 |
+
output_dir_video = output_dir / 'videos'
|
| 49 |
+
output_dir_audio.mkdir(parents=True, exist_ok=True)
|
| 50 |
+
output_dir_video.mkdir(parents=True, exist_ok=True)
|
| 51 |
+
|
| 52 |
+
# load a pretrained model
|
| 53 |
+
seq_cfg.duration = cfg.duration_s
|
| 54 |
+
net: MMAudio = get_my_mmaudio(cfg.model).to(device).eval()
|
| 55 |
+
net.load_weights(torch.load(model.model_path, map_location=device, weights_only=True))
|
| 56 |
+
log.info(f'Loaded weights from {model.model_path}')
|
| 57 |
+
net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len)
|
| 58 |
+
log.info(f'Latent seq len: {seq_cfg.latent_seq_len}')
|
| 59 |
+
log.info(f'Clip seq len: {seq_cfg.clip_seq_len}')
|
| 60 |
+
log.info(f'Sync seq len: {seq_cfg.sync_seq_len}')
|
| 61 |
+
|
| 62 |
+
# misc setup
|
| 63 |
+
rng = torch.Generator(device=device)
|
| 64 |
+
rng.manual_seed(cfg.seed)
|
| 65 |
+
fm = FlowMatching(cfg.sampling.min_sigma,
|
| 66 |
+
inference_mode=cfg.sampling.method,
|
| 67 |
+
num_steps=cfg.sampling.num_steps)
|
| 68 |
+
|
| 69 |
+
feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path,
|
| 70 |
+
synchformer_ckpt=model.synchformer_ckpt,
|
| 71 |
+
enable_conditions=True,
|
| 72 |
+
mode=model.mode,
|
| 73 |
+
bigvgan_vocoder_ckpt=model.bigvgan_16k_path,
|
| 74 |
+
need_vae_encoder=False)
|
| 75 |
+
feature_utils = feature_utils.to(device).eval()
|
| 76 |
+
|
| 77 |
+
if cfg.compile:
|
| 78 |
+
net.preprocess_conditions = torch.compile(net.preprocess_conditions)
|
| 79 |
+
net.predict_flow = torch.compile(net.predict_flow)
|
| 80 |
+
feature_utils.compile()
|
| 81 |
+
|
| 82 |
+
dataset, loader = setup_eval_dataset(cfg.dataset, cfg)
|
| 83 |
+
|
| 84 |
+
with torch.amp.autocast(enabled=cfg.amp, dtype=torch.bfloat16, device_type=device):
|
| 85 |
+
for batch in tqdm(loader):
|
| 86 |
+
audios = generate(batch.get('clip_video', None),
|
| 87 |
+
batch.get('sync_video', None),
|
| 88 |
+
batch.get('caption', None),
|
| 89 |
+
feature_utils=feature_utils,
|
| 90 |
+
net=net,
|
| 91 |
+
fm=fm,
|
| 92 |
+
rng=rng,
|
| 93 |
+
cfg_strength=cfg.cfg_strength,
|
| 94 |
+
clip_batch_size_multiplier=64,
|
| 95 |
+
sync_batch_size_multiplier=64)
|
| 96 |
+
audios = audios.float().cpu()
|
| 97 |
+
names = batch['name']
|
| 98 |
+
'''
|
| 99 |
+
for audio, name in zip(audios, names):
|
| 100 |
+
torchaudio.save(output_dir / f'{name}.flac', audio, seq_cfg.sampling_rate)
|
| 101 |
+
'''
|
| 102 |
+
|
| 103 |
+
'''
|
| 104 |
+
video_infos = batch.get('video_info', None)
|
| 105 |
+
assert video_infos is not None
|
| 106 |
+
for audio, name, video_info in zip(audios, names, video_infos):
|
| 107 |
+
torchaudio.save(output_dir_audio / f'{name}.flac', audio, seq_cfg.sampling_rate)
|
| 108 |
+
video_save_path = output_dir_video / f'{name}.mp4'
|
| 109 |
+
make_video(video_info, video_save_path, audio, sampling_rate=seq_cfg.sampling_rate)
|
| 110 |
+
'''
|
| 111 |
+
|
| 112 |
+
video_paths = batch['video_path']
|
| 113 |
+
|
| 114 |
+
for audio, name, video_path in zip(audios, names, video_paths):
|
| 115 |
+
torchaudio.save(output_dir_audio / f'{name}.flac', audio, seq_cfg.sampling_rate)
|
| 116 |
+
video_info = load_video(video_path, cfg.duration_s)
|
| 117 |
+
video_save_path = output_dir_video / f'{name}.mp4'
|
| 118 |
+
make_video(video_info, video_save_path, audio, sampling_rate=seq_cfg.sampling_rate)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def distributed_setup():
|
| 122 |
+
distributed.init_process_group(backend="nccl")
|
| 123 |
+
local_rank = distributed.get_rank()
|
| 124 |
+
world_size = distributed.get_world_size()
|
| 125 |
+
log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}')
|
| 126 |
+
return local_rank, world_size
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
if __name__ == '__main__':
|
| 130 |
+
distributed_setup()
|
| 131 |
+
|
| 132 |
+
main()
|
| 133 |
+
|
| 134 |
+
# clean-up
|
| 135 |
+
distributed.destroy_process_group()
|
batch_eval.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
import hydra
|
| 6 |
+
import torch
|
| 7 |
+
import torch.distributed as distributed
|
| 8 |
+
import torchaudio
|
| 9 |
+
from hydra.core.hydra_config import HydraConfig
|
| 10 |
+
from omegaconf import DictConfig
|
| 11 |
+
from tqdm import tqdm
|
| 12 |
+
|
| 13 |
+
from mmaudio.data.data_setup import setup_eval_dataset
|
| 14 |
+
from mmaudio.eval_utils import ModelConfig, all_model_cfg, generate, make_video, make_video_new, load_video # todo Feb 24
|
| 15 |
+
from mmaudio.model.flow_matching import FlowMatching
|
| 16 |
+
from mmaudio.model.networks_new import MMAudio, get_my_mmaudio
|
| 17 |
+
from mmaudio.model.utils.features_utils import FeaturesUtils
|
| 18 |
+
|
| 19 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 20 |
+
torch.backends.cudnn.allow_tf32 = True
|
| 21 |
+
|
| 22 |
+
local_rank = int(os.environ['LOCAL_RANK'])
|
| 23 |
+
world_size = int(os.environ['WORLD_SIZE'])
|
| 24 |
+
log = logging.getLogger()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@torch.inference_mode()
|
| 28 |
+
@hydra.main(version_base='1.3.2', config_path='config', config_name='eval_config.yaml')
|
| 29 |
+
def main(cfg: DictConfig):
|
| 30 |
+
device = 'cuda'
|
| 31 |
+
torch.cuda.set_device(local_rank)
|
| 32 |
+
|
| 33 |
+
if cfg.model not in all_model_cfg:
|
| 34 |
+
raise ValueError(f'Unknown model variant: {cfg.model}')
|
| 35 |
+
model: ModelConfig = all_model_cfg[cfg.model]
|
| 36 |
+
#model.download_if_needed()
|
| 37 |
+
seq_cfg = model.seq_cfg
|
| 38 |
+
|
| 39 |
+
run_dir = Path(HydraConfig.get().run.dir)
|
| 40 |
+
if cfg.output_name is None:
|
| 41 |
+
output_dir = run_dir / cfg.dataset
|
| 42 |
+
else:
|
| 43 |
+
output_dir = run_dir / f'{cfg.dataset}-{cfg.output_name}'
|
| 44 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 45 |
+
|
| 46 |
+
# todo
|
| 47 |
+
# output_dir_audio = output_dir / 'audios'
|
| 48 |
+
# output_dir_video = output_dir / 'videos'
|
| 49 |
+
# output_dir_audio.mkdir(parents=True, exist_ok=True)
|
| 50 |
+
# output_dir_video.mkdir(parents=True, exist_ok=True)
|
| 51 |
+
|
| 52 |
+
# load a pretrained model
|
| 53 |
+
seq_cfg.duration = cfg.duration_s
|
| 54 |
+
net: MMAudio = get_my_mmaudio(cfg.model).to(device).eval()
|
| 55 |
+
|
| 56 |
+
# todo May 10
|
| 57 |
+
if model.model_path is None:
|
| 58 |
+
if model.model_name == 'small_44k':
|
| 59 |
+
model.model_path = Path(cfg.small_44k_pretrained_ckpt_path)
|
| 60 |
+
else:
|
| 61 |
+
raise ValueError('Given Model Is Not Supported !')
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
net.load_weights(torch.load(model.model_path, map_location=device, weights_only=True)) # todo Mar 6
|
| 65 |
+
log.info(f'Loaded weights from {model.model_path}')
|
| 66 |
+
net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len)
|
| 67 |
+
log.info(f'Latent seq len: {seq_cfg.latent_seq_len}')
|
| 68 |
+
log.info(f'Clip seq len: {seq_cfg.clip_seq_len}')
|
| 69 |
+
log.info(f'Sync seq len: {seq_cfg.sync_seq_len}')
|
| 70 |
+
|
| 71 |
+
# misc setup
|
| 72 |
+
rng = torch.Generator(device=device)
|
| 73 |
+
rng.manual_seed(cfg.seed)
|
| 74 |
+
fm = FlowMatching(cfg.sampling.min_sigma,
|
| 75 |
+
inference_mode=cfg.sampling.method,
|
| 76 |
+
num_steps=cfg.sampling.num_steps)
|
| 77 |
+
|
| 78 |
+
feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path,
|
| 79 |
+
synchformer_ckpt=model.synchformer_ckpt,
|
| 80 |
+
enable_conditions=True,
|
| 81 |
+
mode=model.mode,
|
| 82 |
+
bigvgan_vocoder_ckpt=model.bigvgan_16k_path,
|
| 83 |
+
need_vae_encoder=False)
|
| 84 |
+
feature_utils = feature_utils.to(device).eval()
|
| 85 |
+
|
| 86 |
+
if cfg.compile:
|
| 87 |
+
net.preprocess_conditions = torch.compile(net.preprocess_conditions)
|
| 88 |
+
net.predict_flow = torch.compile(net.predict_flow)
|
| 89 |
+
feature_utils.compile()
|
| 90 |
+
|
| 91 |
+
dataset, loader = setup_eval_dataset(cfg.dataset, cfg)
|
| 92 |
+
|
| 93 |
+
with torch.amp.autocast(enabled=cfg.amp, dtype=torch.bfloat16, device_type=device):
|
| 94 |
+
for batch in tqdm(loader):
|
| 95 |
+
audios = generate(batch.get('clip_video', None),
|
| 96 |
+
batch.get('sync_video', None),
|
| 97 |
+
batch.get('caption', None),
|
| 98 |
+
feature_utils=feature_utils,
|
| 99 |
+
net=net,
|
| 100 |
+
fm=fm,
|
| 101 |
+
rng=rng,
|
| 102 |
+
cfg_strength=cfg.cfg_strength,
|
| 103 |
+
clip_batch_size_multiplier=64,
|
| 104 |
+
sync_batch_size_multiplier=64)
|
| 105 |
+
audios = audios.float().cpu()
|
| 106 |
+
names = batch['name']
|
| 107 |
+
|
| 108 |
+
for audio, name in zip(audios, names):
|
| 109 |
+
torchaudio.save(output_dir / f'{name}.flac', audio, seq_cfg.sampling_rate)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
'''
|
| 113 |
+
video_infos = batch.get('video_info', None)
|
| 114 |
+
assert video_infos is not None
|
| 115 |
+
for audio, name, video_info in zip(audios, names, video_infos):
|
| 116 |
+
torchaudio.save(output_dir_audio / f'{name}.flac', audio, seq_cfg.sampling_rate)
|
| 117 |
+
video_save_path = output_dir_video / f'{name}.mp4'
|
| 118 |
+
make_video(video_info, video_save_path, audio, sampling_rate=seq_cfg.sampling_rate)
|
| 119 |
+
'''
|
| 120 |
+
'''
|
| 121 |
+
video_paths = batch['video_path']
|
| 122 |
+
|
| 123 |
+
for audio, name, video_path in zip(audios, names, video_paths):
|
| 124 |
+
torchaudio.save(output_dir_audio / f'{name}.flac', audio, seq_cfg.sampling_rate)
|
| 125 |
+
video_info = load_video(video_path, cfg.duration_s)
|
| 126 |
+
video_save_path = output_dir_video / f'{name}.mp4'
|
| 127 |
+
make_video(video_info, video_save_path, audio, sampling_rate=seq_cfg.sampling_rate)
|
| 128 |
+
'''
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def distributed_setup():
|
| 132 |
+
distributed.init_process_group(backend="nccl")
|
| 133 |
+
local_rank = distributed.get_rank()
|
| 134 |
+
world_size = distributed.get_world_size()
|
| 135 |
+
log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}')
|
| 136 |
+
return local_rank, world_size
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
if __name__ == '__main__':
|
| 140 |
+
distributed_setup()
|
| 141 |
+
|
| 142 |
+
main()
|
| 143 |
+
|
| 144 |
+
# clean-up
|
| 145 |
+
distributed.destroy_process_group()
|
config/__init__.py
ADDED
|
File without changes
|
config/base_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- data: base
|
| 3 |
+
- eval_data: base
|
| 4 |
+
- override hydra/job_logging: custom-simplest
|
| 5 |
+
- _self_
|
| 6 |
+
|
| 7 |
+
hydra:
|
| 8 |
+
run:
|
| 9 |
+
dir: ./output/${exp_id}
|
| 10 |
+
output_subdir: ${now:%Y-%m-%d_%H-%M-%S}-hydra
|
| 11 |
+
|
| 12 |
+
enable_email: False
|
| 13 |
+
|
| 14 |
+
model: small_44k
|
| 15 |
+
|
| 16 |
+
exp_id: default
|
| 17 |
+
debug: False
|
| 18 |
+
cudnn_benchmark: True
|
| 19 |
+
compile: True # todo True or False
|
| 20 |
+
amp: True
|
| 21 |
+
weights: null
|
| 22 |
+
checkpoint: null
|
| 23 |
+
seed: 14159265
|
| 24 |
+
num_workers: 10 # per-GPU
|
| 25 |
+
pin_memory: True # set to True if your system can handle it, i.e., have enough memory
|
| 26 |
+
|
| 27 |
+
# NOTE: This DOSE NOT affect the model during inference in any way
|
| 28 |
+
# they are just for the dataloader to fill in the missing data in multi-modal loading
|
| 29 |
+
# to change the sequence length for the model, see networks.py
|
| 30 |
+
data_dim:
|
| 31 |
+
text_seq_len: 77
|
| 32 |
+
clip_dim: 1024
|
| 33 |
+
sync_dim: 768
|
| 34 |
+
text_dim: 1024
|
| 35 |
+
|
| 36 |
+
# ema configuration
|
| 37 |
+
ema:
|
| 38 |
+
enable: True
|
| 39 |
+
sigma_rels: [0.05, 0.1]
|
| 40 |
+
update_every: 1
|
| 41 |
+
checkpoint_every: 5_000
|
| 42 |
+
checkpoint_folder: ./output/${exp_id}/ema_ckpts
|
| 43 |
+
default_output_sigma: 0.05
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# sampling
|
| 47 |
+
sampling:
|
| 48 |
+
mean: 0.0
|
| 49 |
+
scale: 1.0
|
| 50 |
+
min_sigma: 0.0
|
| 51 |
+
method: euler
|
| 52 |
+
num_steps: 25
|
| 53 |
+
|
| 54 |
+
# classifier-free guidance
|
| 55 |
+
null_condition_probability: 0.1 #0.1 todo Feb 10
|
| 56 |
+
cfg_strength: 4.5 #4.5
|
| 57 |
+
|
| 58 |
+
text_condition_drop_enable: False # todo Jan 16 False or True
|
| 59 |
+
text_drop_step: 100_000 # todo Jan 16
|
| 60 |
+
text_condition_drop_probability: [0.1, 0.3, 0.5] #[0.1, 0.3, 0.5] # todo Jan 16
|
| 61 |
+
|
| 62 |
+
# checkpoint paths to external modules
|
| 63 |
+
vae_16k_ckpt: ./ext_weights/v1-16.pth
|
| 64 |
+
vae_44k_ckpt: ./ext_weights/v1-44.pth
|
| 65 |
+
bigvgan_vocoder_ckpt: ./ext_weights/best_netG.pt
|
| 66 |
+
synchformer_ckpt: ./ext_weights/synchformer_state_dict.pth
|
config/dpo_base_config.yaml
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- dpo_data: base
|
| 3 |
+
- eval_data: base
|
| 4 |
+
- override hydra/job_logging: custom-simplest
|
| 5 |
+
- _self_
|
| 6 |
+
|
| 7 |
+
hydra:
|
| 8 |
+
run:
|
| 9 |
+
dir: ./output/${exp_id}
|
| 10 |
+
output_subdir: ${now:%Y-%m-%d_%H-%M-%S}-hydra
|
| 11 |
+
|
| 12 |
+
enable_email: False
|
| 13 |
+
|
| 14 |
+
model: small_44k
|
| 15 |
+
|
| 16 |
+
exp_id: default
|
| 17 |
+
debug: False
|
| 18 |
+
cudnn_benchmark: True
|
| 19 |
+
compile: True # todo True or False
|
| 20 |
+
amp: True
|
| 21 |
+
weights: null
|
| 22 |
+
checkpoint: null
|
| 23 |
+
pretrained_ckpt_for_dpo: ./output/vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k/vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_ema_final.pth
|
| 24 |
+
|
| 25 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May12_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k/vgg_only_small_44k_lumina_v2a_two_stream_May12_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_ema_final.pth
|
| 26 |
+
|
| 27 |
+
#'./output/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16_ema_final.pth'
|
| 28 |
+
|
| 29 |
+
# todo for iter1 checkpoint: beta20000_full_reward_ib_desync
|
| 30 |
+
#'./output/vgg_only_small_44k_lumina_v2a_two_stream_May7_depth16_caption_beta20000_full_reward_ib_desync/vgg_only_small_44k_lumina_v2a_two_stream_May7_depth16_caption_beta20000_full_reward_ib_desync_ema_final.pth'
|
| 31 |
+
|
| 32 |
+
#'./output/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16_ema_final.pth'
|
| 33 |
+
|
| 34 |
+
#'./output/vgg_only_small_44k_lumina_v2a_two_stream_May6_depth16_caption_beta2000_iter1_desync/vgg_only_small_44k_lumina_v2a_two_stream_May6_depth16_caption_beta2000_iter1_desync_ema_final.pth'
|
| 35 |
+
|
| 36 |
+
#'./output/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16_ema_final.pth'
|
| 37 |
+
|
| 38 |
+
#'./output/vgg_only_small_44k_lumina_v2a_two_stream_May6_depth16_caption_beta2000_iter1_desync/vgg_only_small_44k_lumina_v2a_two_stream_May6_depth16_caption_beta2000_iter1_desync_ema_final.pth'
|
| 39 |
+
|
| 40 |
+
#---> always used ckpt of baseline model:
|
| 41 |
+
#'./output/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16_ema_final.pth'
|
| 42 |
+
|
| 43 |
+
#'./output/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16_last.pth'
|
| 44 |
+
#'./output/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16_ema_final.pth' # todo add Mar 2
|
| 45 |
+
seed: 14159265
|
| 46 |
+
num_workers: 10 # per-GPU
|
| 47 |
+
pin_memory: True # set to True if your system can handle it, i.e., have enough memory
|
| 48 |
+
|
| 49 |
+
# NOTE: This DOSE NOT affect the model during inference in any way
|
| 50 |
+
# they are just for the dataloader to fill in the missing data in multi-modal loading
|
| 51 |
+
# to change the sequence length for the model, see networks.py
|
| 52 |
+
data_dim:
|
| 53 |
+
text_seq_len: 77
|
| 54 |
+
clip_dim: 1024
|
| 55 |
+
sync_dim: 768
|
| 56 |
+
text_dim: 1024
|
| 57 |
+
|
| 58 |
+
# ema configuration
|
| 59 |
+
ema:
|
| 60 |
+
enable: True
|
| 61 |
+
sigma_rels: [0.05, 0.1]
|
| 62 |
+
update_every: 1
|
| 63 |
+
checkpoint_every: 500 # todo
|
| 64 |
+
checkpoint_folder: ./output/${exp_id}/ema_ckpts
|
| 65 |
+
default_output_sigma: 0.05
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# sampling
|
| 69 |
+
sampling:
|
| 70 |
+
mean: 0.0
|
| 71 |
+
scale: 1.0
|
| 72 |
+
min_sigma: 0.0
|
| 73 |
+
method: euler
|
| 74 |
+
num_steps: 25
|
| 75 |
+
|
| 76 |
+
# classifier-free guidance
|
| 77 |
+
null_condition_probability: 0.1 #0.1 todo Feb 10
|
| 78 |
+
cfg_strength: 4.5 # May base: 4.5 # todo
|
| 79 |
+
|
| 80 |
+
text_condition_drop_enable: False # todo Jan 16 False or True
|
| 81 |
+
text_drop_step: 100_000 # todo Jan 16
|
| 82 |
+
text_condition_drop_probability: [0.1, 0.3, 0.5] #[0.1, 0.3, 0.5] # todo Jan 16
|
| 83 |
+
|
| 84 |
+
# checkpoint paths to external modules
|
| 85 |
+
vae_16k_ckpt: ./ext_weights/v1-16.pth
|
| 86 |
+
vae_44k_ckpt: ./ext_weights/v1-44.pth
|
| 87 |
+
bigvgan_vocoder_ckpt: ./ext_weights/best_netG.pt
|
| 88 |
+
synchformer_ckpt: ./ext_weights/synchformer_state_dict.pth
|
config/eval_config.yaml
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- base_config
|
| 3 |
+
- override hydra/job_logging: custom-simplest
|
| 4 |
+
- _self_
|
| 5 |
+
|
| 6 |
+
hydra:
|
| 7 |
+
run:
|
| 8 |
+
dir: ./output/${exp_id}
|
| 9 |
+
output_subdir: eval-${now:%Y-%m-%d_%H-%M-%S}-hydra
|
| 10 |
+
|
| 11 |
+
exp_id: ${model}
|
| 12 |
+
dataset: moviegen #vggsound # todo Jan 13
|
| 13 |
+
duration_s: 8.0 # todo can be changed?
|
| 14 |
+
small_44k_pretrained_ckpt_path: ./output/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16_ema_final.pth
|
| 15 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k/vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_ema_final.pth
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter3_steps5k/vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter3_steps5k_ema_final.pth
|
| 19 |
+
|
| 20 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k/vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_ema_final.pth
|
| 21 |
+
|
| 22 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May12_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k/vgg_only_small_44k_lumina_v2a_two_stream_May12_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_ema_final.pth
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May12_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k/vgg_only_small_44k_lumina_v2a_two_stream_May12_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_ema_final.pth
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta30000_full_reward_ib_desync/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta30000_full_reward_ib_desync_ema_final.pth
|
| 29 |
+
|
| 30 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta10000_iter1_desync_to_ib_av/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta10000_iter1_desync_to_ib_av_ema_final.pth
|
| 31 |
+
|
| 32 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta20000_iter1_ib_av_at_2desync/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta20000_iter1_ib_av_at_2desync_ema_final.pth
|
| 33 |
+
|
| 34 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta20000_iter1_ib_2av_at_2desync/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta20000_iter1_ib_2av_at_2desync_ema_final.pth
|
| 35 |
+
|
| 36 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta10000_desync_to_ib_av/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta10000_desync_to_ib_av_ema_final.pth
|
| 37 |
+
|
| 38 |
+
#./output/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta10000_iter1_full_reward_ib_2av_at_2desync_10audio_per_video/vgg_only_small_44k_lumina_v2a_two_stream_May10_depth16_caption_beta10000_iter1_full_reward_ib_2av_at_2desync_10audio_per_video_ema_final.pth
|
| 39 |
+
|
| 40 |
+
#./output/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16/vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb18_caption_depth16_ema_final.pth
|
| 41 |
+
|
| 42 |
+
# for inference, this is the per-GPU batch size
|
| 43 |
+
batch_size: 8 #16
|
| 44 |
+
output_name: lumina_v2a_moviegen_Sep25_base_model_inference_ema
|
| 45 |
+
|
| 46 |
+
#dpo_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter3_steps5k_inference_ema
|
| 47 |
+
|
| 48 |
+
#dpo_May12_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_inference_ema
|
| 49 |
+
|
| 50 |
+
#dpo_iter1_May12_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_inference_ema
|
| 51 |
+
|
| 52 |
+
#dpo_iter1_May11_lumina_v2a_two_stream_depth16_caption_beta30000_full_reward_ib_desync_inference_ema
|
| 53 |
+
|
| 54 |
+
#dpo_iter1_May11_lumina_v2a_two_stream_depth16_caption_beta10000_desync_to_ib_av_inference_ema
|
| 55 |
+
|
| 56 |
+
#dpo_iter1_May10_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_av_at_2desync_inference_ema
|
| 57 |
+
|
| 58 |
+
#dpo_iter1_May10_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_2av_at_2desync_10audio_per_video_inference_ema
|
| 59 |
+
|
| 60 |
+
#dpo_iter1_May10_lumina_v2a_two_stream_depth16_caption_beta10000_desync_to_ib_av_inference_ema
|
| 61 |
+
|
| 62 |
+
#dpo_iter1_May10_lumina_v2a_two_stream_depth16_caption_beta10000_full_reward_ib_2av_at_2desync_10audio_per_video_inference_ema
|
| 63 |
+
|
| 64 |
+
# todo for pretrained baseline model
|
| 65 |
+
#lumina_v2a_two_stream_depth16_caption_May10_check_inference_ema
|
| 66 |
+
|
| 67 |
+
# todo for DPO FT model
|
| 68 |
+
#dpo_iter1_May10_lumina_v2a_two_stream_depth16_caption_beta10000_full_reward_ib_av_at_2desync_10audio_per_video_inference_ema # todo Jan 14
|
config/eval_for_dpo_base_config.yaml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- data: base
|
| 3 |
+
- eval_data_for_gen_dpo: base
|
| 4 |
+
- override hydra/job_logging: custom-simplest
|
| 5 |
+
- _self_
|
| 6 |
+
|
| 7 |
+
hydra:
|
| 8 |
+
run:
|
| 9 |
+
dir: ./output/${exp_id}
|
| 10 |
+
output_subdir: ${now:%Y-%m-%d_%H-%M-%S}-hydra
|
| 11 |
+
|
| 12 |
+
enable_email: False
|
| 13 |
+
|
| 14 |
+
model: small_44k
|
| 15 |
+
|
| 16 |
+
exp_id: default
|
| 17 |
+
debug: False
|
| 18 |
+
cudnn_benchmark: True
|
| 19 |
+
compile: True # todo True or False
|
| 20 |
+
amp: True
|
| 21 |
+
weights: null
|
| 22 |
+
checkpoint: null
|
| 23 |
+
seed: 14159265
|
| 24 |
+
num_workers: 10 # per-GPU
|
| 25 |
+
pin_memory: True # set to True if your system can handle it, i.e., have enough memory
|
| 26 |
+
|
| 27 |
+
# NOTE: This DOSE NOT affect the model during inference in any way
|
| 28 |
+
# they are just for the dataloader to fill in the missing data in multi-modal loading
|
| 29 |
+
# to change the sequence length for the model, see networks.py
|
| 30 |
+
data_dim:
|
| 31 |
+
text_seq_len: 77
|
| 32 |
+
clip_dim: 1024
|
| 33 |
+
sync_dim: 768
|
| 34 |
+
text_dim: 1024
|
| 35 |
+
|
| 36 |
+
# ema configuration
|
| 37 |
+
ema:
|
| 38 |
+
enable: True
|
| 39 |
+
sigma_rels: [0.05, 0.1]
|
| 40 |
+
update_every: 1
|
| 41 |
+
checkpoint_every: 5_000
|
| 42 |
+
checkpoint_folder: ./output/${exp_id}/ema_ckpts
|
| 43 |
+
default_output_sigma: 0.05
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# sampling
|
| 47 |
+
sampling:
|
| 48 |
+
mean: 0.0
|
| 49 |
+
scale: 1.0
|
| 50 |
+
min_sigma: 0.0
|
| 51 |
+
method: euler
|
| 52 |
+
num_steps: 25
|
| 53 |
+
|
| 54 |
+
# classifier-free guidance
|
| 55 |
+
null_condition_probability: 0.1 #0.1 todo Feb 10
|
| 56 |
+
cfg_strength: 4.5
|
| 57 |
+
|
| 58 |
+
text_condition_drop_enable: False # todo Jan 16 False or True
|
| 59 |
+
text_drop_step: 100_000 # todo Jan 16
|
| 60 |
+
text_condition_drop_probability: [0.1, 0.3, 0.5] #[0.1, 0.3, 0.5] # todo Jan 16
|
| 61 |
+
|
| 62 |
+
# checkpoint paths to external modules
|
| 63 |
+
vae_16k_ckpt: ./ext_weights/v1-16.pth
|
| 64 |
+
vae_44k_ckpt: ./ext_weights/v1-44.pth
|
| 65 |
+
bigvgan_vocoder_ckpt: ./ext_weights/best_netG.pt
|
| 66 |
+
synchformer_ckpt: ./ext_weights/synchformer_state_dict.pth
|
config/train_config.yaml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- base_config
|
| 3 |
+
- override data: base
|
| 4 |
+
- override hydra/job_logging: custom
|
| 5 |
+
- _self_
|
| 6 |
+
|
| 7 |
+
hydra:
|
| 8 |
+
run:
|
| 9 |
+
dir: ./output/${exp_id}
|
| 10 |
+
output_subdir: train-${now:%Y-%m-%d_%H-%M-%S}-hydra
|
| 11 |
+
|
| 12 |
+
ema:
|
| 13 |
+
start: 0
|
| 14 |
+
|
| 15 |
+
mini_train: False
|
| 16 |
+
example_train: False
|
| 17 |
+
vgg_only_train: True # todo add in Jan 11
|
| 18 |
+
|
| 19 |
+
log_text_interval: 200
|
| 20 |
+
log_extra_interval: 20_000
|
| 21 |
+
val_interval: 5_000
|
| 22 |
+
eval_interval: 1_000 # todo without using Jan 12
|
| 23 |
+
save_eval_interval: 2_000 # todo without using Jan 12
|
| 24 |
+
save_weights_interval: 10_000
|
| 25 |
+
save_checkpoint_interval: 10_000
|
| 26 |
+
save_copy_iterations: []
|
| 27 |
+
|
| 28 |
+
batch_size: 512 # todo
|
| 29 |
+
eval_batch_size: 256 # per-GPU todo
|
| 30 |
+
|
| 31 |
+
num_iterations: 300_000 # todo original: 500_000
|
| 32 |
+
learning_rate: 1.0e-4
|
| 33 |
+
linear_warmup_steps: 1_000
|
| 34 |
+
|
| 35 |
+
lr_schedule: step
|
| 36 |
+
lr_schedule_steps: [240_000, 270_000] # todo original: [440_000, 470_000]
|
| 37 |
+
lr_schedule_gamma: 0.1
|
| 38 |
+
|
| 39 |
+
clip_grad_norm: 1.0
|
| 40 |
+
weight_decay: 1.0e-6
|
config/train_dpo_config.yaml
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- dpo_base_config
|
| 3 |
+
- override dpo_data: base
|
| 4 |
+
- override hydra/job_logging: custom
|
| 5 |
+
- _self_
|
| 6 |
+
|
| 7 |
+
hydra:
|
| 8 |
+
run:
|
| 9 |
+
dir: ./output/${exp_id}
|
| 10 |
+
output_subdir: train-${now:%Y-%m-%d_%H-%M-%S}-hydra
|
| 11 |
+
|
| 12 |
+
ema:
|
| 13 |
+
start: 0
|
| 14 |
+
|
| 15 |
+
mini_train: False
|
| 16 |
+
example_train: False
|
| 17 |
+
vgg_only_train: False # todo add in Jan 11
|
| 18 |
+
dpo_train: True # todo add in Mar 2
|
| 19 |
+
beta_dpo: 20000 # todo add in Mar 12: 1000, 2000, 5000, 10000
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
log_text_interval: 20 # todo
|
| 23 |
+
log_extra_interval: 500 # May base: 2_000 # todo
|
| 24 |
+
val_interval: 500 # todo
|
| 25 |
+
eval_interval: 100 # todo without using Jan 12
|
| 26 |
+
save_eval_interval: 200 # todo without using Jan 12
|
| 27 |
+
save_weights_interval: 1_000 # todo
|
| 28 |
+
save_checkpoint_interval: 1_000 # todo
|
| 29 |
+
save_copy_iterations: []
|
| 30 |
+
|
| 31 |
+
batch_size: 256 # todo : 512
|
| 32 |
+
eval_batch_size: 128 # per-GPU todo: 256
|
| 33 |
+
|
| 34 |
+
num_iterations: 5_000 # May base: 10_000 # todo Mar 12: 30_000 # todo Mar 6: 10_000
|
| 35 |
+
learning_rate: 2.0e-6
|
| 36 |
+
linear_warmup_steps: 100 #50 # May base: 100 # todo
|
| 37 |
+
|
| 38 |
+
lr_schedule: step
|
| 39 |
+
lr_schedule_steps: [240_000, 270_000] # todo Mar 12
|
| 40 |
+
lr_schedule_gamma: 0.1
|
| 41 |
+
|
| 42 |
+
clip_grad_norm: 1.0
|
| 43 |
+
weight_decay: 1.0e-6
|
docs/EVAL.md
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Evaluation
|
| 2 |
+
|
| 3 |
+
## Batch Evaluation
|
| 4 |
+
|
| 5 |
+
To evaluate the model on a dataset, use the `batch_eval.py` script. It is significantly more efficient in large-scale evaluation compared to `demo.py`, supporting batched inference, multi-GPU inference, torch compilation, and skipping video compositions.
|
| 6 |
+
|
| 7 |
+
An example of running this script with four GPUs is as follows:
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
OMP_NUM_THREADS=4 torchrun --standalone --nproc_per_node=4 batch_eval.py duration_s=8 dataset=vggsound model=small_16k num_workers=8
|
| 11 |
+
```
|
| 12 |
+
|
| 13 |
+
You may need to update the data paths in `config/eval_data/base.yaml`.
|
| 14 |
+
More configuration options can be found in `config/base_config.yaml` and `config/eval_config.yaml`.
|
| 15 |
+
|
| 16 |
+
## Precomputed Results
|
| 17 |
+
|
| 18 |
+
Precomputed results for VGGSound, AudioCaps, and MovieGen are available here: https://huggingface.co/datasets/hkchengrex/MMAudio-precomputed-results
|
| 19 |
+
|
| 20 |
+
## Obtaining Quantitative Metrics
|
| 21 |
+
|
| 22 |
+
Our evaluation code is available here: https://github.com/hkchengrex/av-benchmark
|
docs/MODELS.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Pretrained models
|
| 2 |
+
|
| 3 |
+
The models will be downloaded automatically when you run the demo script. MD5 checksums are provided in `mmaudio/utils/download_utils.py`.
|
| 4 |
+
The models are also available at https://huggingface.co/hkchengrex/MMAudio/tree/main
|
| 5 |
+
|
| 6 |
+
| Model | Download link | File size |
|
| 7 |
+
| -------- | ------- | ------- |
|
| 8 |
+
| Flow prediction network, small 16kHz | <a href="https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_small_16k.pth" download="mmaudio_small_16k.pth">mmaudio_small_16k.pth</a> | 601M |
|
| 9 |
+
| Flow prediction network, small 44.1kHz | <a href="https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_small_44k.pth" download="mmaudio_small_44k.pth">mmaudio_small_44k.pth</a> | 601M |
|
| 10 |
+
| Flow prediction network, medium 44.1kHz | <a href="https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_medium_44k.pth" download="mmaudio_medium_44k.pth">mmaudio_medium_44k.pth</a> | 2.4G |
|
| 11 |
+
| Flow prediction network, large 44.1kHz | <a href="https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_large_44k.pth" download="mmaudio_large_44k.pth">mmaudio_large_44k.pth</a> | 3.9G |
|
| 12 |
+
| Flow prediction network, large 44.1kHz, v2 **(recommended)** | <a href="https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_large_44k_v2.pth" download="mmaudio_large_44k_v2.pth">mmaudio_large_44k_v2.pth</a> | 3.9G |
|
| 13 |
+
| 16kHz VAE | <a href="https://github.com/hkchengrex/MMAudio/releases/download/v0.1/v1-16.pth">v1-16.pth</a> | 655M |
|
| 14 |
+
| 16kHz BigVGAN vocoder (from Make-An-Audio 2) |<a href="https://github.com/hkchengrex/MMAudio/releases/download/v0.1/best_netG.pt">best_netG.pt</a> | 429M |
|
| 15 |
+
| 44.1kHz VAE |<a href="https://github.com/hkchengrex/MMAudio/releases/download/v0.1/v1-44.pth">v1-44.pth</a> | 1.2G |
|
| 16 |
+
| Synchformer visual encoder |<a href="https://github.com/hkchengrex/MMAudio/releases/download/v0.1/synchformer_state_dict.pth">synchformer_state_dict.pth</a> | 907M |
|
| 17 |
+
|
| 18 |
+
To run the model, you need four components: a flow prediction network, visual feature extractors (Synchformer and CLIP, CLIP will be downloaded automatically), a VAE, and a vocoder. VAEs and vocoders are specific to the sampling rate (16kHz or 44.1kHz) and not model sizes.
|
| 19 |
+
The 44.1kHz vocoder will be downloaded automatically.
|
| 20 |
+
The `_v2` model performs worse in benchmarking (e.g., in Fréchet distance), but, in my experience, generalizes better to new data.
|
| 21 |
+
|
| 22 |
+
The expected directory structure (full):
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
MMAudio
|
| 26 |
+
├── ext_weights
|
| 27 |
+
│ ├── best_netG.pt
|
| 28 |
+
│ ├── synchformer_state_dict.pth
|
| 29 |
+
│ ├── v1-16.pth
|
| 30 |
+
│ └── v1-44.pth
|
| 31 |
+
├── weights
|
| 32 |
+
│ ├── mmaudio_small_16k.pth
|
| 33 |
+
│ ├── mmaudio_small_44k.pth
|
| 34 |
+
│ ├── mmaudio_medium_44k.pth
|
| 35 |
+
│ ├── mmaudio_large_44k.pth
|
| 36 |
+
│ └── mmaudio_large_44k_v2.pth
|
| 37 |
+
└── ...
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
The expected directory structure (minimal, for the recommended model only):
|
| 41 |
+
|
| 42 |
+
```bash
|
| 43 |
+
MMAudio
|
| 44 |
+
├── ext_weights
|
| 45 |
+
│ ├── synchformer_state_dict.pth
|
| 46 |
+
│ └── v1-44.pth
|
| 47 |
+
├── weights
|
| 48 |
+
│ └── mmaudio_large_44k_v2.pth
|
| 49 |
+
└── ...
|
| 50 |
+
```
|
docs/TRAINING.md
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Training
|
| 2 |
+
|
| 3 |
+
## Overview
|
| 4 |
+
|
| 5 |
+
We have put a large emphasis on making training as fast as possible.
|
| 6 |
+
Consequently, some pre-processing steps are required.
|
| 7 |
+
|
| 8 |
+
Namely, before starting any training, we
|
| 9 |
+
|
| 10 |
+
1. Encode training audios into spectrograms and then with VAE into mean/std
|
| 11 |
+
2. Extract CLIP and synchronization features from videos
|
| 12 |
+
3. Extract CLIP features from text (captions)
|
| 13 |
+
4. Encode all extracted features into [MemoryMappedTensors](https://pytorch.org/tensordict/main/reference/generated/tensordict.MemoryMappedTensor.html) with [TensorDict](https://pytorch.org/tensordict/main/reference/tensordict.html)
|
| 14 |
+
|
| 15 |
+
**NOTE:** for maximum training speed (e.g., when training the base model with 2*H100s), you would need around 3~5 GB/s of random read speed. Spinning disks would not be able to catch up and most consumer-grade SSDs would struggle. In my experience, the best bet is to have a large enough system memory such that the OS can cache the data. This way, the data is read from RAM instead of disk.
|
| 16 |
+
|
| 17 |
+
The current training script does not support `_v2` training.
|
| 18 |
+
|
| 19 |
+
## Prerequisites
|
| 20 |
+
|
| 21 |
+
Install [av-benchmark](https://github.com/hkchengrex/av-benchmark). We use this library to automatically evaluate on the validation set during training, and on test set after training.
|
| 22 |
+
You will also need ffmpeg for video frames extraction. Note that `torchaudio` imposes a maximum version limit (`ffmpeg<7`). You can install it as follows:
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
conda install -c conda-forge 'ffmpeg<7'
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
Download the corresponding VAE (`v1-16.pth` for 16kHz training, and `v1-44.pth` for 44.1kHz training), vocoder models (`best_netG.pt` for 16kHz training; the vocoder for 44.1kHz training will be downloaded automatically), the [empty string encoding](https://github.com/hkchengrex/MMAudio/releases/download/v0.1/empty_string.pth), and Synchformer weights from [MODELS.md](https://github.com/hkchengrex/MMAudio/blob/main/docs/MODELS.md) place them in `ext_weights/`.
|
| 29 |
+
|
| 30 |
+
## Preparing Audio-Video-Text Features
|
| 31 |
+
|
| 32 |
+
We have prepared some example data in `training/example_videos`.
|
| 33 |
+
Running the `training/extract_video_training_latents.py` script will extract the audio, video, and text features and save them as a `TensorDict` with a `.tsv` file containing metadata on disk.
|
| 34 |
+
|
| 35 |
+
To run this script, use the `torchrun` utility:
|
| 36 |
+
|
| 37 |
+
```bash
|
| 38 |
+
torchrun --standalone training/extract_video_training_latents.py
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
You can run this with multiple GPUs (with `--nproc_per_node=<n>`) to speed up extraction.
|
| 42 |
+
Check the top of the script to switch between 16kHz/44.1kHz extraction and data path definitions.
|
| 43 |
+
|
| 44 |
+
Arguments:
|
| 45 |
+
|
| 46 |
+
- `latent_dir` -- where intermediate latent outputs are saved. It is safe to delete this directory afterwards.
|
| 47 |
+
- `output_dir` -- where TensorDict and the metadata file are saved.
|
| 48 |
+
|
| 49 |
+
## Preparing Audio-Text Features
|
| 50 |
+
|
| 51 |
+
We have prepared some example data in `training/example_audios`.
|
| 52 |
+
We first need to run `training/partition_clips` to partition each audio file into clips.
|
| 53 |
+
Then, we run the `training/extract_audio_training_latents.py` script, which will extract the audio and text features and save them as a `TensorDict` with a `.tsv` file containing metadata on the disk.
|
| 54 |
+
|
| 55 |
+
To run this script:
|
| 56 |
+
|
| 57 |
+
```bash
|
| 58 |
+
python training/partition_clips.py
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
Arguments:
|
| 62 |
+
|
| 63 |
+
- `data_path` -- path to the audio files (`.flac` or `.wav`)
|
| 64 |
+
- `output_dir` -- path to the output `.csv` file
|
| 65 |
+
- `start` -- optional; useful when you need to run multiple processes to speed up processing -- this defines the beginning of the chunk to be processed
|
| 66 |
+
- `end` -- optional; useful when you need to run multiple processes to speed up processing -- this defines the end of the chunk to be processed
|
| 67 |
+
|
| 68 |
+
Then, run the `extract_audio_training_latents.py` with `torchrun`:
|
| 69 |
+
|
| 70 |
+
```bash
|
| 71 |
+
torchrun --standalone training/extract_audio_training_latents.py
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
You can run this with multiple GPUs (with `--nproc_per_node=<n>`) to speed up extraction.
|
| 75 |
+
Check the top of the script to switch between 16kHz/44.1kHz extraction.
|
| 76 |
+
|
| 77 |
+
Arguments:
|
| 78 |
+
|
| 79 |
+
- `data_dir` -- path to the audio files (`.flac` or `.wav`), same as the previous step
|
| 80 |
+
- `captions_tsv` -- path to the captions file, a csv file at least with columns `id` and `caption`
|
| 81 |
+
- `clips_tsv` -- path to the clips file, generated in the last step
|
| 82 |
+
- `latent_dir` -- where intermediate latent outputs are saved. It is safe to delete this directory afterwards.
|
| 83 |
+
- `output_dir` -- where TensorDict and the metadata file are saved.
|
| 84 |
+
|
| 85 |
+
## Training
|
| 86 |
+
|
| 87 |
+
We use Distributed Data Parallel (DDP) for training.
|
| 88 |
+
First, specify the data path in `config/data/base.yaml`. If you used the default parameters in the scripts above to extract features for the example data, the `Example_video` and `Example_audio` items should already be correct.
|
| 89 |
+
|
| 90 |
+
To run training on the example data, use the following command:
|
| 91 |
+
|
| 92 |
+
```bash
|
| 93 |
+
OMP_NUM_THREADS=4 torchrun --standalone --nproc_per_node=1 train.py exp_id=debug compile=False debug=True example_train=True batch_size=1
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
This will not train a useful model, but it will check if everything is set up correctly.
|
| 97 |
+
|
| 98 |
+
For full training on the base model with two GPUs, use the following command:
|
| 99 |
+
|
| 100 |
+
```bash
|
| 101 |
+
OMP_NUM_THREADS=4 torchrun --standalone --nproc_per_node=2 train.py exp_id=exp_1 model=small_16k
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
Any outputs from training will be stored in `output/<exp_id>`.
|
| 105 |
+
|
| 106 |
+
More configuration options can be found in `config/base_config.yaml` and `config/train_config.yaml`.
|
| 107 |
+
|
| 108 |
+
## Checkpoints
|
| 109 |
+
|
| 110 |
+
Model checkpoints, including optimizer states and the latest EMA weights, are available here: https://huggingface.co/hkchengrex/MMAudio
|
| 111 |
+
|
| 112 |
+
---
|
| 113 |
+
|
| 114 |
+
Godspeed!
|
docs/index.html
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<!-- Google tag (gtag.js) -->
|
| 5 |
+
<script async src="https://www.googletagmanager.com/gtag/js?id=G-0JKBJ3WRJZ"></script>
|
| 6 |
+
<script>
|
| 7 |
+
window.dataLayer = window.dataLayer || [];
|
| 8 |
+
function gtag(){dataLayer.push(arguments);}
|
| 9 |
+
gtag('js', new Date());
|
| 10 |
+
gtag('config', 'G-0JKBJ3WRJZ');
|
| 11 |
+
</script>
|
| 12 |
+
|
| 13 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 14 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 15 |
+
<link href="https://fonts.googleapis.com/css2?family=Source+Sans+3&display=swap" rel="stylesheet">
|
| 16 |
+
<meta charset="UTF-8">
|
| 17 |
+
<title>MMAudio</title>
|
| 18 |
+
|
| 19 |
+
<link rel="icon" type="image/png" href="images/icon.png">
|
| 20 |
+
|
| 21 |
+
<meta name="viewport" content="width=device-width, initial-scale=1">
|
| 22 |
+
<!-- CSS only -->
|
| 23 |
+
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" rel="stylesheet"
|
| 24 |
+
integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x" crossorigin="anonymous">
|
| 25 |
+
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
|
| 26 |
+
|
| 27 |
+
<link rel="stylesheet" href="style.css">
|
| 28 |
+
</head>
|
| 29 |
+
<body>
|
| 30 |
+
|
| 31 |
+
<body>
|
| 32 |
+
<br><br><br><br>
|
| 33 |
+
<div class="container">
|
| 34 |
+
<div class="row text-center" style="font-size:38px">
|
| 35 |
+
<div class="col strong">
|
| 36 |
+
Taming Multimodal Joint Training for High-Quality <br>Video-to-Audio Synthesis
|
| 37 |
+
</div>
|
| 38 |
+
</div>
|
| 39 |
+
|
| 40 |
+
<br>
|
| 41 |
+
<div class="row text-center" style="font-size:28px">
|
| 42 |
+
<div class="col">
|
| 43 |
+
arXiv 2024
|
| 44 |
+
</div>
|
| 45 |
+
</div>
|
| 46 |
+
<br>
|
| 47 |
+
|
| 48 |
+
<div class="h-100 row text-center heavy justify-content-md-center" style="font-size:22px;">
|
| 49 |
+
<div class="col-sm-auto px-lg-2">
|
| 50 |
+
<a href="https://hkchengrex.github.io/">Ho Kei Cheng<sup>1</sup></a>
|
| 51 |
+
</div>
|
| 52 |
+
<div class="col-sm-auto px-lg-2">
|
| 53 |
+
<nobr><a href="https://scholar.google.co.jp/citations?user=RRIO1CcAAAAJ">Masato Ishii<sup>2</sup></a></nobr>
|
| 54 |
+
</div>
|
| 55 |
+
<div class="col-sm-auto px-lg-2">
|
| 56 |
+
<nobr><a href="https://scholar.google.com/citations?user=sXAjHFIAAAAJ">Akio Hayakawa<sup>2</sup></a></nobr>
|
| 57 |
+
</div>
|
| 58 |
+
<div class="col-sm-auto px-lg-2">
|
| 59 |
+
<nobr><a href="https://scholar.google.com/citations?user=XCRO260AAAAJ">Takashi Shibuya<sup>2</sup></a></nobr>
|
| 60 |
+
</div>
|
| 61 |
+
<div class="col-sm-auto px-lg-2">
|
| 62 |
+
<nobr><a href="https://www.alexander-schwing.de/">Alexander Schwing<sup>1</sup></a></nobr>
|
| 63 |
+
</div>
|
| 64 |
+
<div class="col-sm-auto px-lg-2" >
|
| 65 |
+
<nobr><a href="https://www.yukimitsufuji.com/">Yuki Mitsufuji<sup>2,3</sup></a></nobr>
|
| 66 |
+
</div>
|
| 67 |
+
</div>
|
| 68 |
+
|
| 69 |
+
<div class="h-100 row text-center heavy justify-content-md-center" style="font-size:22px;">
|
| 70 |
+
<div class="col-sm-auto px-lg-2">
|
| 71 |
+
<sup>1</sup>University of Illinois Urbana-Champaign
|
| 72 |
+
</div>
|
| 73 |
+
<div class="col-sm-auto px-lg-2">
|
| 74 |
+
<sup>2</sup>Sony AI
|
| 75 |
+
</div>
|
| 76 |
+
<div class="col-sm-auto px-lg-2">
|
| 77 |
+
<sup>3</sup>Sony Group Corporation
|
| 78 |
+
</div>
|
| 79 |
+
</div>
|
| 80 |
+
|
| 81 |
+
<br>
|
| 82 |
+
|
| 83 |
+
<br>
|
| 84 |
+
|
| 85 |
+
<div class="h-100 row text-center justify-content-md-center" style="font-size:20px;">
|
| 86 |
+
<div class="col-sm-2">
|
| 87 |
+
<a href="https://arxiv.org/abs/2412.15322">[Paper]</a>
|
| 88 |
+
</div>
|
| 89 |
+
<div class="col-sm-2">
|
| 90 |
+
<a href="https://github.com/hkchengrex/MMAudio">[Code]</a>
|
| 91 |
+
</div>
|
| 92 |
+
<div class="col-sm-3">
|
| 93 |
+
<a href="https://huggingface.co/spaces/hkchengrex/MMAudio">[Huggingface Demo]</a>
|
| 94 |
+
</div>
|
| 95 |
+
<div class="col-sm-2">
|
| 96 |
+
<a href="https://colab.research.google.com/drive/1TAaXCY2-kPk4xE4PwKB3EqFbSnkUuzZ8?usp=sharing">[Colab Demo]</a>
|
| 97 |
+
</div>
|
| 98 |
+
<div class="col-sm-3">
|
| 99 |
+
<a href="https://replicate.com/zsxkib/mmaudio">[Replicate Demo]</a>
|
| 100 |
+
</div>
|
| 101 |
+
</div>
|
| 102 |
+
|
| 103 |
+
<br>
|
| 104 |
+
|
| 105 |
+
<hr>
|
| 106 |
+
|
| 107 |
+
<div class="row" style="font-size:32px">
|
| 108 |
+
<div class="col strong">
|
| 109 |
+
TL;DR
|
| 110 |
+
</div>
|
| 111 |
+
</div>
|
| 112 |
+
<br>
|
| 113 |
+
<div class="row">
|
| 114 |
+
<div class="col">
|
| 115 |
+
<p class="light" style="text-align: left;">
|
| 116 |
+
MMAudio generates synchronized audio given video and/or text inputs.
|
| 117 |
+
</p>
|
| 118 |
+
</div>
|
| 119 |
+
</div>
|
| 120 |
+
|
| 121 |
+
<br>
|
| 122 |
+
<hr>
|
| 123 |
+
<br>
|
| 124 |
+
|
| 125 |
+
<div class="row" style="font-size:32px">
|
| 126 |
+
<div class="col strong">
|
| 127 |
+
Demo
|
| 128 |
+
</div>
|
| 129 |
+
</div>
|
| 130 |
+
<br>
|
| 131 |
+
<div class="row" style="font-size:48px">
|
| 132 |
+
<div class="col strong text-center">
|
| 133 |
+
<a href="video_main.html" style="text-decoration: underline;"><More results></a>
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
<br>
|
| 137 |
+
<div class="video-container" style="text-align: center;">
|
| 138 |
+
<iframe src="https://youtube.com/embed/YElewUT2M4M"></iframe>
|
| 139 |
+
</div>
|
| 140 |
+
|
| 141 |
+
<br>
|
| 142 |
+
|
| 143 |
+
<br><br>
|
| 144 |
+
<br><br>
|
| 145 |
+
|
| 146 |
+
</div>
|
| 147 |
+
|
| 148 |
+
</body>
|
| 149 |
+
</html>
|
docs/style.css
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
body {
|
| 2 |
+
font-family: 'Source Sans 3', sans-serif;
|
| 3 |
+
font-size: 18px;
|
| 4 |
+
margin-left: auto;
|
| 5 |
+
margin-right: auto;
|
| 6 |
+
font-weight: 400;
|
| 7 |
+
height: 100%;
|
| 8 |
+
max-width: 1000px;
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
table {
|
| 12 |
+
width: 100%;
|
| 13 |
+
border-collapse: collapse;
|
| 14 |
+
}
|
| 15 |
+
th, td {
|
| 16 |
+
border: 1px solid #ddd;
|
| 17 |
+
padding: 8px;
|
| 18 |
+
text-align: center;
|
| 19 |
+
}
|
| 20 |
+
th {
|
| 21 |
+
background-color: #f2f2f2;
|
| 22 |
+
}
|
| 23 |
+
video {
|
| 24 |
+
width: 100%;
|
| 25 |
+
height: auto;
|
| 26 |
+
}
|
| 27 |
+
p {
|
| 28 |
+
font-size: 28px;
|
| 29 |
+
}
|
| 30 |
+
h2 {
|
| 31 |
+
font-size: 36px;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
.strong {
|
| 35 |
+
font-weight: 700;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
.light {
|
| 39 |
+
font-weight: 100;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
.heavy {
|
| 43 |
+
font-weight: 900;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
.column {
|
| 47 |
+
float: left;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
a:link,
|
| 51 |
+
a:visited {
|
| 52 |
+
color: #05538f;
|
| 53 |
+
text-decoration: none;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
a:hover {
|
| 57 |
+
color: #63cbdd;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
hr {
|
| 61 |
+
border: 0;
|
| 62 |
+
height: 1px;
|
| 63 |
+
background-image: linear-gradient(to right, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.75), rgba(0, 0, 0, 0));
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
.video-container {
|
| 67 |
+
position: relative;
|
| 68 |
+
padding-bottom: 56.25%; /* 16:9 */
|
| 69 |
+
height: 0;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
.video-container iframe {
|
| 73 |
+
position: absolute;
|
| 74 |
+
top: 0;
|
| 75 |
+
left: 0;
|
| 76 |
+
width: 100%;
|
| 77 |
+
height: 100%;
|
| 78 |
+
}
|
docs/style_videos.css
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
body {
|
| 2 |
+
font-family: 'Source Sans 3', sans-serif;
|
| 3 |
+
font-size: 1.5vh;
|
| 4 |
+
font-weight: 400;
|
| 5 |
+
}
|
| 6 |
+
|
| 7 |
+
table {
|
| 8 |
+
width: 100%;
|
| 9 |
+
border-collapse: collapse;
|
| 10 |
+
}
|
| 11 |
+
th, td {
|
| 12 |
+
border: 1px solid #ddd;
|
| 13 |
+
padding: 8px;
|
| 14 |
+
text-align: center;
|
| 15 |
+
}
|
| 16 |
+
th {
|
| 17 |
+
background-color: #f2f2f2;
|
| 18 |
+
}
|
| 19 |
+
video {
|
| 20 |
+
width: 100%;
|
| 21 |
+
height: auto;
|
| 22 |
+
}
|
| 23 |
+
p {
|
| 24 |
+
font-size: 1.5vh;
|
| 25 |
+
font-weight: bold;
|
| 26 |
+
}
|
| 27 |
+
h2 {
|
| 28 |
+
font-size: 2vh;
|
| 29 |
+
font-weight: bold;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
.video-container {
|
| 33 |
+
position: relative;
|
| 34 |
+
padding-bottom: 56.25%; /* 16:9 */
|
| 35 |
+
height: 0;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
.video-container iframe {
|
| 39 |
+
position: absolute;
|
| 40 |
+
top: 0;
|
| 41 |
+
left: 0;
|
| 42 |
+
width: 100%;
|
| 43 |
+
height: 100%;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
.video-header {
|
| 47 |
+
background-color: #f2f2f2;
|
| 48 |
+
text-align: center;
|
| 49 |
+
font-size: 1.5vh;
|
| 50 |
+
font-weight: bold;
|
| 51 |
+
padding: 8px;
|
| 52 |
+
}
|
docs/video_gen.html
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<!-- Google tag (gtag.js) -->
|
| 5 |
+
<script async src="https://www.googletagmanager.com/gtag/js?id=G-0JKBJ3WRJZ"></script>
|
| 6 |
+
<script>
|
| 7 |
+
window.dataLayer = window.dataLayer || [];
|
| 8 |
+
function gtag(){dataLayer.push(arguments);}
|
| 9 |
+
gtag('js', new Date());
|
| 10 |
+
gtag('config', 'G-0JKBJ3WRJZ');
|
| 11 |
+
</script>
|
| 12 |
+
|
| 13 |
+
<link href='https://fonts.googleapis.com/css?family=Source+Sans+Pro' rel='stylesheet' type='text/css'>
|
| 14 |
+
<meta charset="UTF-8">
|
| 15 |
+
<title>MMAudio</title>
|
| 16 |
+
|
| 17 |
+
<link rel="icon" type="image/png" href="images/icon.png">
|
| 18 |
+
|
| 19 |
+
<meta name="viewport" content="width=device-width, initial-scale=1">
|
| 20 |
+
<!-- CSS only -->
|
| 21 |
+
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" rel="stylesheet"
|
| 22 |
+
integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x" crossorigin="anonymous">
|
| 23 |
+
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.7.1/jquery.min.js"></script>
|
| 24 |
+
|
| 25 |
+
<link rel="stylesheet" href="style_videos.css">
|
| 26 |
+
</head>
|
| 27 |
+
<body>
|
| 28 |
+
|
| 29 |
+
<div id="moviegen_all">
|
| 30 |
+
<h2 id="moviegen" style="text-align: center;">Comparisons with Movie Gen Audio on Videos Generated by MovieGen</h2>
|
| 31 |
+
<p id="moviegen1" style="overflow: hidden;">
|
| 32 |
+
Example 1: Ice cracking with sharp snapping sound, and metal tool scraping against the ice surface.
|
| 33 |
+
<span style="float: right;"><a href="#index">Back to index</a></span>
|
| 34 |
+
</p>
|
| 35 |
+
|
| 36 |
+
<div class="row g-1">
|
| 37 |
+
<div class="col-sm-6">
|
| 38 |
+
<div class="video-header">Movie Gen Audio</div>
|
| 39 |
+
<div class="video-container">
|
| 40 |
+
<iframe src="https://youtube.com/embed/d7Lb0ihtGcE"></iframe>
|
| 41 |
+
</div>
|
| 42 |
+
</div>
|
| 43 |
+
<div class="col-sm-6">
|
| 44 |
+
<div class="video-header">Ours</div>
|
| 45 |
+
<div class="video-container">
|
| 46 |
+
<iframe src="https://youtube.com/embed/F4JoJ2r2m8U"></iframe>
|
| 47 |
+
</div>
|
| 48 |
+
</div>
|
| 49 |
+
</div>
|
| 50 |
+
<br>
|
| 51 |
+
|
| 52 |
+
<!-- <p id="moviegen2">Example 2: Rhythmic splashing and lapping of water. <span style="float:right;"><a href="#index">Back to index</a></span> </p>
|
| 53 |
+
|
| 54 |
+
<table>
|
| 55 |
+
<thead>
|
| 56 |
+
<tr>
|
| 57 |
+
<th>Movie Gen Audio</th>
|
| 58 |
+
<th>Ours</th>
|
| 59 |
+
</tr>
|
| 60 |
+
</thead>
|
| 61 |
+
<tbody>
|
| 62 |
+
<tr>
|
| 63 |
+
<td width="50%">
|
| 64 |
+
<div class="video-container">
|
| 65 |
+
<iframe src="https://youtube.com/embed/5gQNPK99CIk"></iframe>
|
| 66 |
+
</div>
|
| 67 |
+
</td>
|
| 68 |
+
<td width="50%">
|
| 69 |
+
<div class="video-container">
|
| 70 |
+
<iframe src="https://youtube.com/embed/AbwnTzG-BpA"></iframe>
|
| 71 |
+
</div>
|
| 72 |
+
</td>
|
| 73 |
+
</tr>
|
| 74 |
+
</tbody>
|
| 75 |
+
</table> -->
|
| 76 |
+
|
| 77 |
+
<p id="moviegen2" style="overflow: hidden;">
|
| 78 |
+
Example 2: Rhythmic splashing and lapping of water.
|
| 79 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 80 |
+
</p>
|
| 81 |
+
<div class="row g-1">
|
| 82 |
+
<div class="col-sm-6">
|
| 83 |
+
<div class="video-header">Movie Gen Audio</div>
|
| 84 |
+
<div class="video-container">
|
| 85 |
+
<iframe src="https://youtube.com/embed/5gQNPK99CIk"></iframe>
|
| 86 |
+
</div>
|
| 87 |
+
</div>
|
| 88 |
+
<div class="col-sm-6">
|
| 89 |
+
<div class="video-header">Ours</div>
|
| 90 |
+
<div class="video-container">
|
| 91 |
+
<iframe src="https://youtube.com/embed/AbwnTzG-BpA"></iframe>
|
| 92 |
+
</div>
|
| 93 |
+
</div>
|
| 94 |
+
</div>
|
| 95 |
+
<br>
|
| 96 |
+
|
| 97 |
+
<p id="moviegen3" style="overflow: hidden;">
|
| 98 |
+
Example 3: Shovel scrapes against dry earth.
|
| 99 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 100 |
+
</p>
|
| 101 |
+
<div class="row g-1">
|
| 102 |
+
<div class="col-sm-6">
|
| 103 |
+
<div class="video-header">Movie Gen Audio</div>
|
| 104 |
+
<div class="video-container">
|
| 105 |
+
<iframe src="https://youtube.com/embed/PUKGyEve7XQ"></iframe>
|
| 106 |
+
</div>
|
| 107 |
+
</div>
|
| 108 |
+
<div class="col-sm-6">
|
| 109 |
+
<div class="video-header">Ours</div>
|
| 110 |
+
<div class="video-container">
|
| 111 |
+
<iframe src="https://youtube.com/embed/CNn7i8VNkdc"></iframe>
|
| 112 |
+
</div>
|
| 113 |
+
</div>
|
| 114 |
+
</div>
|
| 115 |
+
<br>
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
<p id="moviegen4" style="overflow: hidden;">
|
| 119 |
+
(Failure case) Example 4: Creamy sound of mashed potatoes being scooped.
|
| 120 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 121 |
+
</p>
|
| 122 |
+
<div class="row g-1">
|
| 123 |
+
<div class="col-sm-6">
|
| 124 |
+
<div class="video-header">Movie Gen Audio</div>
|
| 125 |
+
<div class="video-container">
|
| 126 |
+
<iframe src="https://youtube.com/embed/PJv1zxR9JjQ"></iframe>
|
| 127 |
+
</div>
|
| 128 |
+
</div>
|
| 129 |
+
<div class="col-sm-6">
|
| 130 |
+
<div class="video-header">Ours</div>
|
| 131 |
+
<div class="video-container">
|
| 132 |
+
<iframe src="https://youtube.com/embed/c3-LJ1lNsPQ"></iframe>
|
| 133 |
+
</div>
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
<br>
|
| 137 |
+
|
| 138 |
+
</div>
|
| 139 |
+
|
| 140 |
+
<div id="hunyuan_sora_all">
|
| 141 |
+
|
| 142 |
+
<h2 id="hunyuan" style="text-align: center;">Results on Videos Generated by Hunyuan</h2>
|
| 143 |
+
<p style="overflow: hidden;">
|
| 144 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 145 |
+
</p>
|
| 146 |
+
<div class="row g-1">
|
| 147 |
+
<div class="col-sm-6">
|
| 148 |
+
<div class="video-header">Typing</div>
|
| 149 |
+
<div class="video-container">
|
| 150 |
+
<iframe src="https://youtube.com/embed/8ln_9hhH_nk"></iframe>
|
| 151 |
+
</div>
|
| 152 |
+
</div>
|
| 153 |
+
<div class="col-sm-6">
|
| 154 |
+
<div class="video-header">Water is rushing down a stream and pouring</div>
|
| 155 |
+
<div class="video-container">
|
| 156 |
+
<iframe src="https://youtube.com/embed/5df1FZFQj30"></iframe>
|
| 157 |
+
</div>
|
| 158 |
+
</div>
|
| 159 |
+
</div>
|
| 160 |
+
<div class="row g-1">
|
| 161 |
+
<div class="col-sm-6">
|
| 162 |
+
<div class="video-header">Waves on beach</div>
|
| 163 |
+
<div class="video-container">
|
| 164 |
+
<iframe src="https://youtube.com/embed/7wQ9D5WgpFc"></iframe>
|
| 165 |
+
</div>
|
| 166 |
+
</div>
|
| 167 |
+
<div class="col-sm-6">
|
| 168 |
+
<div class="video-header">Water droplet</div>
|
| 169 |
+
<div class="video-container">
|
| 170 |
+
<iframe src="https://youtube.com/embed/q7M2nsalGjM"></iframe>
|
| 171 |
+
</div>
|
| 172 |
+
</div>
|
| 173 |
+
</div>
|
| 174 |
+
<br>
|
| 175 |
+
|
| 176 |
+
<h2 id="sora" style="text-align: center;">Results on Videos Generated by Sora</h2>
|
| 177 |
+
<p style="overflow: hidden;">
|
| 178 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 179 |
+
</p>
|
| 180 |
+
<div class="row g-1">
|
| 181 |
+
<div class="col-sm-6">
|
| 182 |
+
<div class="video-header">Ships riding waves</div>
|
| 183 |
+
<div class="video-container">
|
| 184 |
+
<iframe src="https://youtube.com/embed/JbgQzHHytk8"></iframe>
|
| 185 |
+
</div>
|
| 186 |
+
</div>
|
| 187 |
+
<div class="col-sm-6">
|
| 188 |
+
<div class="video-header">Train (no text prompt given)</div>
|
| 189 |
+
<div class="video-container">
|
| 190 |
+
<iframe src="https://youtube.com/embed/xOW7zrjpWC8"></iframe>
|
| 191 |
+
</div>
|
| 192 |
+
</div>
|
| 193 |
+
</div>
|
| 194 |
+
<div class="row g-1">
|
| 195 |
+
<div class="col-sm-6">
|
| 196 |
+
<div class="video-header">Seashore (no text prompt given)</div>
|
| 197 |
+
<div class="video-container">
|
| 198 |
+
<iframe src="https://youtube.com/embed/fIuw5Y8ZZ9E"></iframe>
|
| 199 |
+
</div>
|
| 200 |
+
</div>
|
| 201 |
+
<div class="col-sm-6">
|
| 202 |
+
<div class="video-header">Surfing (failure: unprompted music)</div>
|
| 203 |
+
<div class="video-container">
|
| 204 |
+
<iframe src="https://youtube.com/embed/UcSTk-v0M_s"></iframe>
|
| 205 |
+
</div>
|
| 206 |
+
</div>
|
| 207 |
+
</div>
|
| 208 |
+
<br>
|
| 209 |
+
|
| 210 |
+
<div id="mochi_ltx_all">
|
| 211 |
+
<h2 id="mochi" style="text-align: center;">Results on Videos Generated by Mochi 1</h2>
|
| 212 |
+
<p style="overflow: hidden;">
|
| 213 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 214 |
+
</p>
|
| 215 |
+
<div class="row g-1">
|
| 216 |
+
<div class="col-sm-6">
|
| 217 |
+
<div class="video-header">Magical fire and lightning (no text prompt given)</div>
|
| 218 |
+
<div class="video-container">
|
| 219 |
+
<iframe src="https://youtube.com/embed/tTlRZaSMNwY"></iframe>
|
| 220 |
+
</div>
|
| 221 |
+
</div>
|
| 222 |
+
<div class="col-sm-6">
|
| 223 |
+
<div class="video-header">Storm (no text prompt given)</div>
|
| 224 |
+
<div class="video-container">
|
| 225 |
+
<iframe src="https://youtube.com/embed/4hrZTMJUy3w"></iframe>
|
| 226 |
+
</div>
|
| 227 |
+
</div>
|
| 228 |
+
</div>
|
| 229 |
+
<br>
|
| 230 |
+
|
| 231 |
+
<h2 id="ltx" style="text-align: center;">Results on Videos Generated by LTX-Video</h2>
|
| 232 |
+
<p style="overflow: hidden;">
|
| 233 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 234 |
+
</p>
|
| 235 |
+
<div class="row g-1">
|
| 236 |
+
<div class="col-sm-6">
|
| 237 |
+
<div class="video-header">Firewood burning and cracking</div>
|
| 238 |
+
<div class="video-container">
|
| 239 |
+
<iframe src="https://youtube.com/embed/P7_DDpgev0g"></iframe>
|
| 240 |
+
</div>
|
| 241 |
+
</div>
|
| 242 |
+
<div class="col-sm-6">
|
| 243 |
+
<div class="video-header">Waterfall, water splashing</div>
|
| 244 |
+
<div class="video-container">
|
| 245 |
+
<iframe src="https://youtube.com/embed/4MvjceYnIO0"></iframe>
|
| 246 |
+
</div>
|
| 247 |
+
</div>
|
| 248 |
+
</div>
|
| 249 |
+
<br>
|
| 250 |
+
|
| 251 |
+
</div>
|
| 252 |
+
|
| 253 |
+
</body>
|
| 254 |
+
</html>
|
docs/video_main.html
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<!-- Google tag (gtag.js) -->
|
| 5 |
+
<script async src="https://www.googletagmanager.com/gtag/js?id=G-0JKBJ3WRJZ"></script>
|
| 6 |
+
<script>
|
| 7 |
+
window.dataLayer = window.dataLayer || [];
|
| 8 |
+
function gtag(){dataLayer.push(arguments);}
|
| 9 |
+
gtag('js', new Date());
|
| 10 |
+
gtag('config', 'G-0JKBJ3WRJZ');
|
| 11 |
+
</script>
|
| 12 |
+
|
| 13 |
+
<link href='https://fonts.googleapis.com/css?family=Source+Sans+Pro' rel='stylesheet' type='text/css'>
|
| 14 |
+
<meta charset="UTF-8">
|
| 15 |
+
<title>MMAudio</title>
|
| 16 |
+
|
| 17 |
+
<link rel="icon" type="image/png" href="images/icon.png">
|
| 18 |
+
|
| 19 |
+
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
|
| 20 |
+
<!-- CSS only -->
|
| 21 |
+
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" rel="stylesheet"
|
| 22 |
+
integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x" crossorigin="anonymous">
|
| 23 |
+
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.7.1/jquery.min.js"></script>
|
| 24 |
+
|
| 25 |
+
<link rel="stylesheet" href="style_videos.css">
|
| 26 |
+
|
| 27 |
+
<script type="text/javascript">
|
| 28 |
+
$(document).ready(function(){
|
| 29 |
+
$("#content").load("video_gen.html #moviegen_all");
|
| 30 |
+
$("#load_moveigen").click(function(){
|
| 31 |
+
$("#content").load("video_gen.html #moviegen_all");
|
| 32 |
+
});
|
| 33 |
+
$("#load_hunyuan_sora").click(function(){
|
| 34 |
+
$("#content").load("video_gen.html #hunyuan_sora_all");
|
| 35 |
+
});
|
| 36 |
+
$("#load_mochi_ltx").click(function(){
|
| 37 |
+
$("#content").load("video_gen.html #mochi_ltx_all");
|
| 38 |
+
});
|
| 39 |
+
$("#load_vgg1").click(function(){
|
| 40 |
+
$("#content").load("video_vgg.html #vgg1");
|
| 41 |
+
});
|
| 42 |
+
$("#load_vgg2").click(function(){
|
| 43 |
+
$("#content").load("video_vgg.html #vgg2");
|
| 44 |
+
});
|
| 45 |
+
$("#load_vgg3").click(function(){
|
| 46 |
+
$("#content").load("video_vgg.html #vgg3");
|
| 47 |
+
});
|
| 48 |
+
$("#load_vgg4").click(function(){
|
| 49 |
+
$("#content").load("video_vgg.html #vgg4");
|
| 50 |
+
});
|
| 51 |
+
$("#load_vgg5").click(function(){
|
| 52 |
+
$("#content").load("video_vgg.html #vgg5");
|
| 53 |
+
});
|
| 54 |
+
$("#load_vgg6").click(function(){
|
| 55 |
+
$("#content").load("video_vgg.html #vgg6");
|
| 56 |
+
});
|
| 57 |
+
$("#load_vgg_extra").click(function(){
|
| 58 |
+
$("#content").load("video_vgg.html #vgg_extra");
|
| 59 |
+
});
|
| 60 |
+
});
|
| 61 |
+
</script>
|
| 62 |
+
</head>
|
| 63 |
+
<body>
|
| 64 |
+
<h1 id="index" style="text-align: center;">Index</h1>
|
| 65 |
+
<p><b>(Click on the links to load the corresponding videos)</b> <span style="float:right;"><a href="index.html">Back to project page</a></span></p>
|
| 66 |
+
|
| 67 |
+
<ol>
|
| 68 |
+
<li>
|
| 69 |
+
<a href="#" id="load_moveigen">Comparisons with Movie Gen Audio on Videos Generated by MovieGen</a>
|
| 70 |
+
</li>
|
| 71 |
+
<li>
|
| 72 |
+
<a href="#" id="load_hunyuan_sora">Results on Videos Generated by Hunyuan and Sora</a>
|
| 73 |
+
</li>
|
| 74 |
+
<li>
|
| 75 |
+
<a href="#" id="load_mochi_ltx">Results on Videos Generated by Mochi 1 and LTX-Video</a>
|
| 76 |
+
</li>
|
| 77 |
+
<li>
|
| 78 |
+
On VGGSound
|
| 79 |
+
<ol>
|
| 80 |
+
<li><a id='load_vgg1' href="#">Example 1: Wolf howling</a></li>
|
| 81 |
+
<li><a id='load_vgg2' href="#">Example 2: Striking a golf ball</a></li>
|
| 82 |
+
<li><a id='load_vgg3' href="#">Example 3: Hitting a drum</a></li>
|
| 83 |
+
<li><a id='load_vgg4' href="#">Example 4: Dog barking</a></li>
|
| 84 |
+
<li><a id='load_vgg5' href="#">Example 5: Playing a string instrument</a></li>
|
| 85 |
+
<li><a id='load_vgg6' href="#">Example 6: A group of people playing tambourines</a></li>
|
| 86 |
+
<li><a id='load_vgg_extra' href="#">Extra results & failure cases</a></li>
|
| 87 |
+
</ol>
|
| 88 |
+
</li>
|
| 89 |
+
</ol>
|
| 90 |
+
|
| 91 |
+
<div id="content" class="container-fluid">
|
| 92 |
+
|
| 93 |
+
</div>
|
| 94 |
+
<br>
|
| 95 |
+
<br>
|
| 96 |
+
|
| 97 |
+
</body>
|
| 98 |
+
</html>
|
docs/video_vgg.html
ADDED
|
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<!-- Google tag (gtag.js) -->
|
| 5 |
+
<script async src="https://www.googletagmanager.com/gtag/js?id=G-0JKBJ3WRJZ"></script>
|
| 6 |
+
<script>
|
| 7 |
+
window.dataLayer = window.dataLayer || [];
|
| 8 |
+
function gtag(){dataLayer.push(arguments);}
|
| 9 |
+
gtag('js', new Date());
|
| 10 |
+
gtag('config', 'G-0JKBJ3WRJZ');
|
| 11 |
+
</script>
|
| 12 |
+
|
| 13 |
+
<link href='https://fonts.googleapis.com/css?family=Source+Sans+Pro' rel='stylesheet' type='text/css'>
|
| 14 |
+
<meta charset="UTF-8">
|
| 15 |
+
<title>MMAudio</title>
|
| 16 |
+
|
| 17 |
+
<meta name="viewport" content="width=device-width, initial-scale=1">
|
| 18 |
+
<!-- CSS only -->
|
| 19 |
+
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" rel="stylesheet"
|
| 20 |
+
integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x" crossorigin="anonymous">
|
| 21 |
+
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
|
| 22 |
+
|
| 23 |
+
<link rel="stylesheet" href="style_videos.css">
|
| 24 |
+
</head>
|
| 25 |
+
<body>
|
| 26 |
+
|
| 27 |
+
<div id="vgg1">
|
| 28 |
+
<h2 style="text-align: center;">Comparisons with state-of-the-art methods in VGGSound</h2>
|
| 29 |
+
<p style="overflow: hidden;">
|
| 30 |
+
Example 1: Wolf howling.
|
| 31 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 32 |
+
</p>
|
| 33 |
+
<div class="row g-1">
|
| 34 |
+
<div class="col-sm-3">
|
| 35 |
+
<div class="video-header">Ground-truth</div>
|
| 36 |
+
<div class="video-container">
|
| 37 |
+
<iframe src="https://youtube.com/embed/9J_V74gqMUA"></iframe>
|
| 38 |
+
</div>
|
| 39 |
+
</div>
|
| 40 |
+
<div class="col-sm-3">
|
| 41 |
+
<div class="video-header">Ours</div>
|
| 42 |
+
<div class="video-container">
|
| 43 |
+
<iframe src="https://youtube.com/embed/P6O8IpjErPc"></iframe>
|
| 44 |
+
</div>
|
| 45 |
+
</div>
|
| 46 |
+
<div class="col-sm-3">
|
| 47 |
+
<div class="video-header">V2A-Mapper</div>
|
| 48 |
+
<div class="video-container">
|
| 49 |
+
<iframe src="https://youtube.com/embed/w-5eyqepvTk"></iframe>
|
| 50 |
+
</div>
|
| 51 |
+
</div>
|
| 52 |
+
<div class="col-sm-3">
|
| 53 |
+
<div class="video-header">FoleyCrafter</div>
|
| 54 |
+
<div class="video-container">
|
| 55 |
+
<iframe src="https://youtube.com/embed/VOLfoZlRkzo"></iframe>
|
| 56 |
+
</div>
|
| 57 |
+
</div>
|
| 58 |
+
</div>
|
| 59 |
+
<div class="row g-1">
|
| 60 |
+
<div class="col-sm-3">
|
| 61 |
+
<div class="video-header">Frieren</div>
|
| 62 |
+
<div class="video-container">
|
| 63 |
+
<iframe src="https://youtube.com/embed/49owKyA5Pa8"></iframe>
|
| 64 |
+
</div>
|
| 65 |
+
</div>
|
| 66 |
+
<div class="col-sm-3">
|
| 67 |
+
<div class="video-header">VATT</div>
|
| 68 |
+
<div class="video-container">
|
| 69 |
+
<iframe src="https://youtube.com/embed/QVtrFgbeGDM"></iframe>
|
| 70 |
+
</div>
|
| 71 |
+
</div>
|
| 72 |
+
<div class="col-sm-3">
|
| 73 |
+
<div class="video-header">V-AURA</div>
|
| 74 |
+
<div class="video-container">
|
| 75 |
+
<iframe src="https://youtube.com/embed/8r0uEfSNjvI"></iframe>
|
| 76 |
+
</div>
|
| 77 |
+
</div>
|
| 78 |
+
<div class="col-sm-3">
|
| 79 |
+
<div class="video-header">Seeing and Hearing</div>
|
| 80 |
+
<div class="video-container">
|
| 81 |
+
<iframe src="https://youtube.com/embed/bn-sLg2qulk"></iframe>
|
| 82 |
+
</div>
|
| 83 |
+
</div>
|
| 84 |
+
</div>
|
| 85 |
+
</div>
|
| 86 |
+
|
| 87 |
+
<div id="vgg2">
|
| 88 |
+
<h2 style="text-align: center;">Comparisons with state-of-the-art methods in VGGSound</h2>
|
| 89 |
+
<p style="overflow: hidden;">
|
| 90 |
+
Example 2: Striking a golf ball.
|
| 91 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 92 |
+
</p>
|
| 93 |
+
|
| 94 |
+
<div class="row g-1">
|
| 95 |
+
<div class="col-sm-3">
|
| 96 |
+
<div class="video-header">Ground-truth</div>
|
| 97 |
+
<div class="video-container">
|
| 98 |
+
<iframe src="https://youtube.com/embed/1hwSu42kkho"></iframe>
|
| 99 |
+
</div>
|
| 100 |
+
</div>
|
| 101 |
+
<div class="col-sm-3">
|
| 102 |
+
<div class="video-header">Ours</div>
|
| 103 |
+
<div class="video-container">
|
| 104 |
+
<iframe src="https://youtube.com/embed/kZibDoDCNxI"></iframe>
|
| 105 |
+
</div>
|
| 106 |
+
</div>
|
| 107 |
+
<div class="col-sm-3">
|
| 108 |
+
<div class="video-header">V2A-Mapper</div>
|
| 109 |
+
<div class="video-container">
|
| 110 |
+
<iframe src="https://youtube.com/embed/jgKfLBLhh7Y"></iframe>
|
| 111 |
+
</div>
|
| 112 |
+
</div>
|
| 113 |
+
<div class="col-sm-3">
|
| 114 |
+
<div class="video-header">FoleyCrafter</div>
|
| 115 |
+
<div class="video-container">
|
| 116 |
+
<iframe src="https://youtube.com/embed/Lfsx8mOPcJo"></iframe>
|
| 117 |
+
</div>
|
| 118 |
+
</div>
|
| 119 |
+
</div>
|
| 120 |
+
<div class="row g-1">
|
| 121 |
+
<div class="col-sm-3">
|
| 122 |
+
<div class="video-header">Frieren</div>
|
| 123 |
+
<div class="video-container">
|
| 124 |
+
<iframe src="https://youtube.com/embed/tz-LpbB0MBc"></iframe>
|
| 125 |
+
</div>
|
| 126 |
+
</div>
|
| 127 |
+
<div class="col-sm-3">
|
| 128 |
+
<div class="video-header">VATT</div>
|
| 129 |
+
<div class="video-container">
|
| 130 |
+
<iframe src="https://youtube.com/embed/RTDUHMi08n4"></iframe>
|
| 131 |
+
</div>
|
| 132 |
+
</div>
|
| 133 |
+
<div class="col-sm-3">
|
| 134 |
+
<div class="video-header">V-AURA</div>
|
| 135 |
+
<div class="video-container">
|
| 136 |
+
<iframe src="https://youtube.com/embed/N-3TDOsPnZQ"></iframe>
|
| 137 |
+
</div>
|
| 138 |
+
</div>
|
| 139 |
+
<div class="col-sm-3">
|
| 140 |
+
<div class="video-header">Seeing and Hearing</div>
|
| 141 |
+
<div class="video-container">
|
| 142 |
+
<iframe src="https://youtube.com/embed/QnsHnLn4gB0"></iframe>
|
| 143 |
+
</div>
|
| 144 |
+
</div>
|
| 145 |
+
</div>
|
| 146 |
+
</div>
|
| 147 |
+
|
| 148 |
+
<div id="vgg3">
|
| 149 |
+
<h2 style="text-align: center;">Comparisons with state-of-the-art methods in VGGSound</h2>
|
| 150 |
+
<p style="overflow: hidden;">
|
| 151 |
+
Example 3: Hitting a drum.
|
| 152 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 153 |
+
</p>
|
| 154 |
+
|
| 155 |
+
<div class="row g-1">
|
| 156 |
+
<div class="col-sm-3">
|
| 157 |
+
<div class="video-header">Ground-truth</div>
|
| 158 |
+
<div class="video-container">
|
| 159 |
+
<iframe src="https://youtube.com/embed/0oeIwq77w0Q"></iframe>
|
| 160 |
+
</div>
|
| 161 |
+
</div>
|
| 162 |
+
<div class="col-sm-3">
|
| 163 |
+
<div class="video-header">Ours</div>
|
| 164 |
+
<div class="video-container">
|
| 165 |
+
<iframe src="https://youtube.com/embed/-UtPV9ohuIM"></iframe>
|
| 166 |
+
</div>
|
| 167 |
+
</div>
|
| 168 |
+
<div class="col-sm-3">
|
| 169 |
+
<div class="video-header">V2A-Mapper</div>
|
| 170 |
+
<div class="video-container">
|
| 171 |
+
<iframe src="https://youtube.com/embed/9yivkgN-zwc"></iframe>
|
| 172 |
+
</div>
|
| 173 |
+
</div>
|
| 174 |
+
<div class="col-sm-3">
|
| 175 |
+
<div class="video-header">FoleyCrafter</div>
|
| 176 |
+
<div class="video-container">
|
| 177 |
+
<iframe src="https://youtube.com/embed/kkCsXPOlBvY"></iframe>
|
| 178 |
+
</div>
|
| 179 |
+
</div>
|
| 180 |
+
</div>
|
| 181 |
+
<div class="row g-1">
|
| 182 |
+
<div class="col-sm-3">
|
| 183 |
+
<div class="video-header">Frieren</div>
|
| 184 |
+
<div class="video-container">
|
| 185 |
+
<iframe src="https://youtube.com/embed/MbNKsVsuvig"></iframe>
|
| 186 |
+
</div>
|
| 187 |
+
</div>
|
| 188 |
+
<div class="col-sm-3">
|
| 189 |
+
<div class="video-header">VATT</div>
|
| 190 |
+
<div class="video-container">
|
| 191 |
+
<iframe src="https://youtube.com/embed/2yYviBjrpBw"></iframe>
|
| 192 |
+
</div>
|
| 193 |
+
</div>
|
| 194 |
+
<div class="col-sm-3">
|
| 195 |
+
<div class="video-header">V-AURA</div>
|
| 196 |
+
<div class="video-container">
|
| 197 |
+
<iframe src="https://youtube.com/embed/9yivkgN-zwc"></iframe>
|
| 198 |
+
</div>
|
| 199 |
+
</div>
|
| 200 |
+
<div class="col-sm-3">
|
| 201 |
+
<div class="video-header">Seeing and Hearing</div>
|
| 202 |
+
<div class="video-container">
|
| 203 |
+
<iframe src="https://youtube.com/embed/6dnyQt4Fuhs"></iframe>
|
| 204 |
+
</div>
|
| 205 |
+
</div>
|
| 206 |
+
</div>
|
| 207 |
+
</div>
|
| 208 |
+
</div>
|
| 209 |
+
|
| 210 |
+
<div id="vgg4">
|
| 211 |
+
<h2 style="text-align: center;">Comparisons with state-of-the-art methods in VGGSound</h2>
|
| 212 |
+
<p style="overflow: hidden;">
|
| 213 |
+
Example 4: Dog barking.
|
| 214 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 215 |
+
</p>
|
| 216 |
+
|
| 217 |
+
<div class="row g-1">
|
| 218 |
+
<div class="col-sm-3">
|
| 219 |
+
<div class="video-header">Ground-truth</div>
|
| 220 |
+
<div class="video-container">
|
| 221 |
+
<iframe src="https://youtube.com/embed/ckaqvTyMYAw"></iframe>
|
| 222 |
+
</div>
|
| 223 |
+
</div>
|
| 224 |
+
<div class="col-sm-3">
|
| 225 |
+
<div class="video-header">Ours</div>
|
| 226 |
+
<div class="video-container">
|
| 227 |
+
<iframe src="https://youtube.com/embed/_aRndFZzZ-I"></iframe>
|
| 228 |
+
</div>
|
| 229 |
+
</div>
|
| 230 |
+
<div class="col-sm-3">
|
| 231 |
+
<div class="video-header">V2A-Mapper</div>
|
| 232 |
+
<div class="video-container">
|
| 233 |
+
<iframe src="https://youtube.com/embed/mNCISP3LBl0"></iframe>
|
| 234 |
+
</div>
|
| 235 |
+
</div>
|
| 236 |
+
<div class="col-sm-3">
|
| 237 |
+
<div class="video-header">FoleyCrafter</div>
|
| 238 |
+
<div class="video-container">
|
| 239 |
+
<iframe src="https://youtube.com/embed/phZBQ3L7foE"></iframe>
|
| 240 |
+
</div>
|
| 241 |
+
</div>
|
| 242 |
+
</div>
|
| 243 |
+
<div class="row g-1">
|
| 244 |
+
<div class="col-sm-3">
|
| 245 |
+
<div class="video-header">Frieren</div>
|
| 246 |
+
<div class="video-container">
|
| 247 |
+
<iframe src="https://youtube.com/embed/Sb5Mg1-ORao"></iframe>
|
| 248 |
+
</div>
|
| 249 |
+
</div>
|
| 250 |
+
<div class="col-sm-3">
|
| 251 |
+
<div class="video-header">VATT</div>
|
| 252 |
+
<div class="video-container">
|
| 253 |
+
<iframe src="https://youtube.com/embed/eHmAGOmtDDg"></iframe>
|
| 254 |
+
</div>
|
| 255 |
+
</div>
|
| 256 |
+
<div class="col-sm-3">
|
| 257 |
+
<div class="video-header">V-AURA</div>
|
| 258 |
+
<div class="video-container">
|
| 259 |
+
<iframe src="https://youtube.com/embed/NEGa3krBrm0"></iframe>
|
| 260 |
+
</div>
|
| 261 |
+
</div>
|
| 262 |
+
<div class="col-sm-3">
|
| 263 |
+
<div class="video-header">Seeing and Hearing</div>
|
| 264 |
+
<div class="video-container">
|
| 265 |
+
<iframe src="https://youtube.com/embed/aO0EAXlwE7A"></iframe>
|
| 266 |
+
</div>
|
| 267 |
+
</div>
|
| 268 |
+
</div>
|
| 269 |
+
</div>
|
| 270 |
+
|
| 271 |
+
<div id="vgg5">
|
| 272 |
+
<h2 style="text-align: center;">Comparisons with state-of-the-art methods in VGGSound</h2>
|
| 273 |
+
<p style="overflow: hidden;">
|
| 274 |
+
Example 5: Playing a string instrument.
|
| 275 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 276 |
+
</p>
|
| 277 |
+
|
| 278 |
+
<div class="row g-1">
|
| 279 |
+
<div class="col-sm-3">
|
| 280 |
+
<div class="video-header">Ground-truth</div>
|
| 281 |
+
<div class="video-container">
|
| 282 |
+
<iframe src="https://youtube.com/embed/KP1QhWauIOc"></iframe>
|
| 283 |
+
</div>
|
| 284 |
+
</div>
|
| 285 |
+
<div class="col-sm-3">
|
| 286 |
+
<div class="video-header">Ours</div>
|
| 287 |
+
<div class="video-container">
|
| 288 |
+
<iframe src="https://youtube.com/embed/ovaJhWSquYE"></iframe>
|
| 289 |
+
</div>
|
| 290 |
+
</div>
|
| 291 |
+
<div class="col-sm-3">
|
| 292 |
+
<div class="video-header">V2A-Mapper</div>
|
| 293 |
+
<div class="video-container">
|
| 294 |
+
<iframe src="https://youtube.com/embed/N723FS9lcy8"></iframe>
|
| 295 |
+
</div>
|
| 296 |
+
</div>
|
| 297 |
+
<div class="col-sm-3">
|
| 298 |
+
<div class="video-header">FoleyCrafter</div>
|
| 299 |
+
<div class="video-container">
|
| 300 |
+
<iframe src="https://youtube.com/embed/t0N4ZAAXo58"></iframe>
|
| 301 |
+
</div>
|
| 302 |
+
</div>
|
| 303 |
+
</div>
|
| 304 |
+
<div class="row g-1">
|
| 305 |
+
<div class="col-sm-3">
|
| 306 |
+
<div class="video-header">Frieren</div>
|
| 307 |
+
<div class="video-container">
|
| 308 |
+
<iframe src="https://youtube.com/embed/8YSRs03QNNA"></iframe>
|
| 309 |
+
</div>
|
| 310 |
+
</div>
|
| 311 |
+
<div class="col-sm-3">
|
| 312 |
+
<div class="video-header">VATT</div>
|
| 313 |
+
<div class="video-container">
|
| 314 |
+
<iframe src="https://youtube.com/embed/vOpMz55J1kY"></iframe>
|
| 315 |
+
</div>
|
| 316 |
+
</div>
|
| 317 |
+
<div class="col-sm-3">
|
| 318 |
+
<div class="video-header">V-AURA</div>
|
| 319 |
+
<div class="video-container">
|
| 320 |
+
<iframe src="https://youtube.com/embed/9JHC75vr9h0"></iframe>
|
| 321 |
+
</div>
|
| 322 |
+
</div>
|
| 323 |
+
<div class="col-sm-3">
|
| 324 |
+
<div class="video-header">Seeing and Hearing</div>
|
| 325 |
+
<div class="video-container">
|
| 326 |
+
<iframe src="https://youtube.com/embed/9w0JckNzXmY"></iframe>
|
| 327 |
+
</div>
|
| 328 |
+
</div>
|
| 329 |
+
</div>
|
| 330 |
+
</div>
|
| 331 |
+
|
| 332 |
+
<div id="vgg6">
|
| 333 |
+
<h2 style="text-align: center;">Comparisons with state-of-the-art methods in VGGSound</h2>
|
| 334 |
+
<p style="overflow: hidden;">
|
| 335 |
+
Example 6: A group of people playing tambourines.
|
| 336 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 337 |
+
</p>
|
| 338 |
+
|
| 339 |
+
<div class="row g-1">
|
| 340 |
+
<div class="col-sm-3">
|
| 341 |
+
<div class="video-header">Ground-truth</div>
|
| 342 |
+
<div class="video-container">
|
| 343 |
+
<iframe src="https://youtube.com/embed/mx6JLxzUkRc"></iframe>
|
| 344 |
+
</div>
|
| 345 |
+
</div>
|
| 346 |
+
<div class="col-sm-3">
|
| 347 |
+
<div class="video-header">Ours</div>
|
| 348 |
+
<div class="video-container">
|
| 349 |
+
<iframe src="https://youtube.com/embed/oLirHhP9Su8"></iframe>
|
| 350 |
+
</div>
|
| 351 |
+
</div>
|
| 352 |
+
<div class="col-sm-3">
|
| 353 |
+
<div class="video-header">V2A-Mapper</div>
|
| 354 |
+
<div class="video-container">
|
| 355 |
+
<iframe src="https://youtube.com/embed/HkLkHMqptv0"></iframe>
|
| 356 |
+
</div>
|
| 357 |
+
</div>
|
| 358 |
+
<div class="col-sm-3">
|
| 359 |
+
<div class="video-header">FoleyCrafter</div>
|
| 360 |
+
<div class="video-container">
|
| 361 |
+
<iframe src="https://youtube.com/embed/rpHiiODjmNU"></iframe>
|
| 362 |
+
</div>
|
| 363 |
+
</div>
|
| 364 |
+
</div>
|
| 365 |
+
<div class="row g-1">
|
| 366 |
+
<div class="col-sm-3">
|
| 367 |
+
<div class="video-header">Frieren</div>
|
| 368 |
+
<div class="video-container">
|
| 369 |
+
<iframe src="https://youtube.com/embed/1mVD3fJ0LpM"></iframe>
|
| 370 |
+
</div>
|
| 371 |
+
</div>
|
| 372 |
+
<div class="col-sm-3">
|
| 373 |
+
<div class="video-header">VATT</div>
|
| 374 |
+
<div class="video-container">
|
| 375 |
+
<iframe src="https://youtube.com/embed/yjVFnJiEJlw"></iframe>
|
| 376 |
+
</div>
|
| 377 |
+
</div>
|
| 378 |
+
<div class="col-sm-3">
|
| 379 |
+
<div class="video-header">V-AURA</div>
|
| 380 |
+
<div class="video-container">
|
| 381 |
+
<iframe src="https://youtube.com/embed/neVeMSWtRkU"></iframe>
|
| 382 |
+
</div>
|
| 383 |
+
</div>
|
| 384 |
+
<div class="col-sm-3">
|
| 385 |
+
<div class="video-header">Seeing and Hearing</div>
|
| 386 |
+
<div class="video-container">
|
| 387 |
+
<iframe src="https://youtube.com/embed/EUE7YwyVWz8"></iframe>
|
| 388 |
+
</div>
|
| 389 |
+
</div>
|
| 390 |
+
</div>
|
| 391 |
+
</div>
|
| 392 |
+
|
| 393 |
+
<div id="vgg_extra">
|
| 394 |
+
<h2 style="text-align: center;">Comparisons with state-of-the-art methods in VGGSound</h2>
|
| 395 |
+
<p style="overflow: hidden;">
|
| 396 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 397 |
+
</p>
|
| 398 |
+
|
| 399 |
+
<div class="row g-1">
|
| 400 |
+
<div class="col-sm-3">
|
| 401 |
+
<div class="video-header">Moving train</div>
|
| 402 |
+
<div class="video-container">
|
| 403 |
+
<iframe src="https://youtube.com/embed/Ta6H45rBzJc"></iframe>
|
| 404 |
+
</div>
|
| 405 |
+
</div>
|
| 406 |
+
<div class="col-sm-3">
|
| 407 |
+
<div class="video-header">Water splashing</div>
|
| 408 |
+
<div class="video-container">
|
| 409 |
+
<iframe src="https://youtube.com/embed/hl6AtgHXpb4"></iframe>
|
| 410 |
+
</div>
|
| 411 |
+
</div>
|
| 412 |
+
<div class="col-sm-3">
|
| 413 |
+
<div class="video-header">Skateboarding</div>
|
| 414 |
+
<div class="video-container">
|
| 415 |
+
<iframe src="https://youtube.com/embed/n4sCNi_9buI"></iframe>
|
| 416 |
+
</div>
|
| 417 |
+
</div>
|
| 418 |
+
<div class="col-sm-3">
|
| 419 |
+
<div class="video-header">Synchronized clapping</div>
|
| 420 |
+
<div class="video-container">
|
| 421 |
+
<iframe src="https://youtube.com/embed/oxexfpLn7FE"></iframe>
|
| 422 |
+
</div>
|
| 423 |
+
</div>
|
| 424 |
+
</div>
|
| 425 |
+
|
| 426 |
+
<br><br>
|
| 427 |
+
|
| 428 |
+
<div id="extra-failure">
|
| 429 |
+
<h2 style="text-align: center;">Failure cases</h2>
|
| 430 |
+
<p style="overflow: hidden;">
|
| 431 |
+
<span style="float:right;"><a href="#index">Back to index</a></span>
|
| 432 |
+
</p>
|
| 433 |
+
|
| 434 |
+
<div class="row g-1">
|
| 435 |
+
<div class="col-sm-6">
|
| 436 |
+
<div class="video-header">Human speech</div>
|
| 437 |
+
<div class="video-container">
|
| 438 |
+
<iframe src="https://youtube.com/embed/nx0CyrDu70Y"></iframe>
|
| 439 |
+
</div>
|
| 440 |
+
</div>
|
| 441 |
+
<div class="col-sm-6">
|
| 442 |
+
<div class="video-header">Unfamiliar vision input</div>
|
| 443 |
+
<div class="video-container">
|
| 444 |
+
<iframe src="https://youtube.com/embed/hfnAqmK3X7w"></iframe>
|
| 445 |
+
</div>
|
| 446 |
+
</div>
|
| 447 |
+
</div>
|
| 448 |
+
</div>
|
| 449 |
+
</div>
|
| 450 |
+
|
| 451 |
+
</body>
|
| 452 |
+
</html>
|
filter_dataset/av_align.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
AV-Align Metric: Audio-Video Alignment Evaluation
|
| 3 |
+
|
| 4 |
+
AV-Align is a metric for evaluating the alignment between audio and video modalities in multimedia data.
|
| 5 |
+
It assesses synchronization by detecting audio and video peaks and calculating their Intersection over Union (IoU).
|
| 6 |
+
A higher IoU score indicates better alignment.
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
- Provide a folder of video files as input.
|
| 10 |
+
- The script calculates the AV-Align score for the set of videos.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
import argparse
|
| 15 |
+
import glob
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
import cv2
|
| 19 |
+
import librosa
|
| 20 |
+
import librosa.display
|
| 21 |
+
|
| 22 |
+
import multiprocessing
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
import pandas as pd
|
| 25 |
+
|
| 26 |
+
# Function to extract frames from a video file
|
| 27 |
+
def extract_frames(video_path):
|
| 28 |
+
"""
|
| 29 |
+
Extract frames from a video file.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
video_path (str): Path to the input video file.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
frames (list): List of frames extracted from the video.
|
| 36 |
+
frame_rate (float): Frame rate of the video.
|
| 37 |
+
"""
|
| 38 |
+
frames = []
|
| 39 |
+
cap = cv2.VideoCapture(video_path)
|
| 40 |
+
frame_rate = cap.get(cv2.CAP_PROP_FPS)
|
| 41 |
+
|
| 42 |
+
if not cap.isOpened():
|
| 43 |
+
raise ValueError(f"Error: Unable to open the video file. Wrong video is {video_path}")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
while True:
|
| 47 |
+
ret, frame = cap.read()
|
| 48 |
+
if not ret:
|
| 49 |
+
break
|
| 50 |
+
frames.append(frame)
|
| 51 |
+
cap.release()
|
| 52 |
+
return frames, frame_rate
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# Function to detect audio peaks using the Onset Detection algorithm
|
| 56 |
+
def detect_audio_peaks(audio_file):
|
| 57 |
+
"""
|
| 58 |
+
Detect audio peaks using the Onset Detection algorithm.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
audio_file (str): Path to the audio file.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
onset_times (list): List of times (in seconds) where audio peaks occur.
|
| 65 |
+
"""
|
| 66 |
+
y, sr = librosa.load(audio_file)
|
| 67 |
+
# Calculate the onset envelope
|
| 68 |
+
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
|
| 69 |
+
# Get the onset events
|
| 70 |
+
onset_frames = librosa.onset.onset_detect(onset_envelope=onset_env, sr=sr)
|
| 71 |
+
onset_times = librosa.frames_to_time(onset_frames, sr=sr)
|
| 72 |
+
return onset_times
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# Function to find local maxima in a list
|
| 76 |
+
def find_local_max_indexes(arr, fps):
|
| 77 |
+
"""
|
| 78 |
+
Find local maxima in a list.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
arr (list): List of values to find local maxima in.
|
| 82 |
+
fps (float): Frames per second, used to convert indexes to time.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
local_extrema_indexes (list): List of times (in seconds) where local maxima occur.
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
local_extrema_indexes = []
|
| 89 |
+
n = len(arr)
|
| 90 |
+
for i in range(1, n - 1):
|
| 91 |
+
if arr[i - 1] < arr[i] > arr[i + 1]: # Local maximum
|
| 92 |
+
local_extrema_indexes.append(i / fps)
|
| 93 |
+
|
| 94 |
+
return local_extrema_indexes
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# Function to detect video peaks using Optical Flow
|
| 98 |
+
def detect_video_peaks(frames, fps):
|
| 99 |
+
"""
|
| 100 |
+
Detect video peaks using Optical Flow.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
frames (list): List of video frames.
|
| 104 |
+
fps (float): Frame rate of the video.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
flow_trajectory (list): List of optical flow magnitudes for each frame.
|
| 108 |
+
video_peaks (list): List of times (in seconds) where video peaks occur.
|
| 109 |
+
"""
|
| 110 |
+
flow_trajectory = [compute_of(frames[0], frames[1])] + [compute_of(frames[i - 1], frames[i]) for i in range(1, len(frames))]
|
| 111 |
+
video_peaks = find_local_max_indexes(flow_trajectory, fps)
|
| 112 |
+
return flow_trajectory, video_peaks
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# Function to compute the optical flow magnitude between two frames
|
| 116 |
+
def compute_of(img1, img2):
|
| 117 |
+
"""
|
| 118 |
+
Compute the optical flow magnitude between two video frames.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
img1 (numpy.ndarray): First video frame.
|
| 122 |
+
img2 (numpy.ndarray): Second video frame.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
avg_magnitude (float): Average optical flow magnitude for the frame pair.
|
| 126 |
+
"""
|
| 127 |
+
# Calculate the optical flow
|
| 128 |
+
prev_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
|
| 129 |
+
curr_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
|
| 130 |
+
flow = cv2.calcOpticalFlowFarneback(prev_gray, curr_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
|
| 131 |
+
|
| 132 |
+
# Calculate the magnitude of the optical flow vectors
|
| 133 |
+
magnitude = cv2.magnitude(flow[..., 0], flow[..., 1])
|
| 134 |
+
avg_magnitude = cv2.mean(magnitude)[0]
|
| 135 |
+
return avg_magnitude
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# Function to calculate Intersection over Union (IoU) for audio and video peaks
|
| 139 |
+
def calc_intersection_over_union(audio_peaks, video_peaks, fps):
|
| 140 |
+
"""
|
| 141 |
+
Calculate Intersection over Union (IoU) between audio and video peaks.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
audio_peaks (list): List of audio peak times (in seconds).
|
| 145 |
+
video_peaks (list): List of video peak times (in seconds).
|
| 146 |
+
fps (float): Frame rate of the video.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
iou (float): Intersection over Union score.
|
| 150 |
+
"""
|
| 151 |
+
intersection_length = 0
|
| 152 |
+
for audio_peak in audio_peaks:
|
| 153 |
+
for video_peak in video_peaks:
|
| 154 |
+
if video_peak - 1 / fps < audio_peak < video_peak + 1 / fps:
|
| 155 |
+
intersection_length += 1
|
| 156 |
+
break
|
| 157 |
+
return intersection_length / (len(audio_peaks) + len(video_peaks) - intersection_length)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def multi_wrapper(args):
|
| 161 |
+
return wrapper_cal_av_score(*args)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def wrapper_cal_av_score(video_path, audio_path):
|
| 165 |
+
video_id = os.path.basename(video_path)[:-4]
|
| 166 |
+
try:
|
| 167 |
+
frames, fps = extract_frames(video_path)
|
| 168 |
+
audio_peaks = detect_audio_peaks(audio_path)
|
| 169 |
+
flow_trajectory, video_peaks = detect_video_peaks(frames, fps)
|
| 170 |
+
score = calc_intersection_over_union(audio_peaks, video_peaks, fps)
|
| 171 |
+
print(f'Calculated AV Align score for {os.path.basename(video_path)}')
|
| 172 |
+
except:
|
| 173 |
+
score = -1
|
| 174 |
+
print(f'Cannot Calculate AV Align score for {os.path.basename(video_path)}')
|
| 175 |
+
|
| 176 |
+
data = {
|
| 177 |
+
'video_id': video_id,
|
| 178 |
+
'score': score,
|
| 179 |
+
}
|
| 180 |
+
return data
|
| 181 |
+
|
| 182 |
+
if __name__ == "__main__":
|
| 183 |
+
|
| 184 |
+
threshold = 0.2
|
| 185 |
+
video_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/videos'
|
| 186 |
+
audio_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/audios_vggsound'
|
| 187 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/filter_dataset'
|
| 188 |
+
|
| 189 |
+
audio_file_list = os.listdir(audio_dir)
|
| 190 |
+
|
| 191 |
+
video_audio_list = []
|
| 192 |
+
|
| 193 |
+
for audio_file in tqdm(audio_file_list):
|
| 194 |
+
video_id = audio_file[:-4]
|
| 195 |
+
video_file = video_id + '.mp4'
|
| 196 |
+
video_path = os.path.join(video_dir, video_file)
|
| 197 |
+
audio_path = os.path.join(audio_dir, audio_file)
|
| 198 |
+
video_audio_list.append((video_path, audio_path))
|
| 199 |
+
|
| 200 |
+
cores = multiprocessing.cpu_count()
|
| 201 |
+
pool = multiprocessing.Pool(processes=cores)
|
| 202 |
+
|
| 203 |
+
results = list(tqdm(pool.imap(multi_wrapper, video_audio_list), total=len(video_audio_list)))
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
#with multiprocessing.Pool(processes=cores) as pool:
|
| 207 |
+
# results = pool.imap(multi_wrapper, video_audio_list)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
output_df = pd.DataFrame(results)
|
| 211 |
+
output_df.to_csv(os.path.join(output_dir, 'vggsound_av_align_score.tsv') , sep='\t', index=False)
|
| 212 |
+
|
| 213 |
+
print("Finished !!!")
|
filter_dataset/av_align_hpc.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
AV-Align Metric: Audio-Video Alignment Evaluation
|
| 3 |
+
|
| 4 |
+
AV-Align is a metric for evaluating the alignment between audio and video modalities in multimedia data.
|
| 5 |
+
It assesses synchronization by detecting audio and video peaks and calculating their Intersection over Union (IoU).
|
| 6 |
+
A higher IoU score indicates better alignment.
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
- Provide a folder of video files as input.
|
| 10 |
+
- The script calculates the AV-Align score for the set of videos.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
import argparse
|
| 15 |
+
import glob
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
import cv2
|
| 19 |
+
import librosa
|
| 20 |
+
import librosa.display
|
| 21 |
+
|
| 22 |
+
import multiprocessing
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
import pandas as pd
|
| 25 |
+
|
| 26 |
+
# Function to extract frames from a video file
|
| 27 |
+
def extract_frames(video_path):
|
| 28 |
+
"""
|
| 29 |
+
Extract frames from a video file.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
video_path (str): Path to the input video file.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
frames (list): List of frames extracted from the video.
|
| 36 |
+
frame_rate (float): Frame rate of the video.
|
| 37 |
+
"""
|
| 38 |
+
frames = []
|
| 39 |
+
cap = cv2.VideoCapture(video_path)
|
| 40 |
+
frame_rate = cap.get(cv2.CAP_PROP_FPS)
|
| 41 |
+
|
| 42 |
+
if not cap.isOpened():
|
| 43 |
+
raise ValueError(f"Error: Unable to open the video file. Wrong video is {video_path}")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
while True:
|
| 47 |
+
ret, frame = cap.read()
|
| 48 |
+
if not ret:
|
| 49 |
+
break
|
| 50 |
+
frames.append(frame)
|
| 51 |
+
cap.release()
|
| 52 |
+
return frames, frame_rate
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# Function to detect audio peaks using the Onset Detection algorithm
|
| 56 |
+
def detect_audio_peaks(audio_file):
|
| 57 |
+
"""
|
| 58 |
+
Detect audio peaks using the Onset Detection algorithm.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
audio_file (str): Path to the audio file.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
onset_times (list): List of times (in seconds) where audio peaks occur.
|
| 65 |
+
"""
|
| 66 |
+
y, sr = librosa.load(audio_file)
|
| 67 |
+
# Calculate the onset envelope
|
| 68 |
+
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
|
| 69 |
+
# Get the onset events
|
| 70 |
+
onset_frames = librosa.onset.onset_detect(onset_envelope=onset_env, sr=sr)
|
| 71 |
+
onset_times = librosa.frames_to_time(onset_frames, sr=sr)
|
| 72 |
+
return onset_times
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# Function to find local maxima in a list
|
| 76 |
+
def find_local_max_indexes(arr, fps):
|
| 77 |
+
"""
|
| 78 |
+
Find local maxima in a list.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
arr (list): List of values to find local maxima in.
|
| 82 |
+
fps (float): Frames per second, used to convert indexes to time.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
local_extrema_indexes (list): List of times (in seconds) where local maxima occur.
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
local_extrema_indexes = []
|
| 89 |
+
n = len(arr)
|
| 90 |
+
for i in range(1, n - 1):
|
| 91 |
+
if arr[i - 1] < arr[i] > arr[i + 1]: # Local maximum
|
| 92 |
+
local_extrema_indexes.append(i / fps)
|
| 93 |
+
|
| 94 |
+
return local_extrema_indexes
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# Function to detect video peaks using Optical Flow
|
| 98 |
+
def detect_video_peaks(frames, fps):
|
| 99 |
+
"""
|
| 100 |
+
Detect video peaks using Optical Flow.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
frames (list): List of video frames.
|
| 104 |
+
fps (float): Frame rate of the video.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
flow_trajectory (list): List of optical flow magnitudes for each frame.
|
| 108 |
+
video_peaks (list): List of times (in seconds) where video peaks occur.
|
| 109 |
+
"""
|
| 110 |
+
flow_trajectory = [compute_of(frames[0], frames[1])] + [compute_of(frames[i - 1], frames[i]) for i in range(1, len(frames))]
|
| 111 |
+
video_peaks = find_local_max_indexes(flow_trajectory, fps)
|
| 112 |
+
return flow_trajectory, video_peaks
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# Function to compute the optical flow magnitude between two frames
|
| 116 |
+
def compute_of(img1, img2):
|
| 117 |
+
"""
|
| 118 |
+
Compute the optical flow magnitude between two video frames.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
img1 (numpy.ndarray): First video frame.
|
| 122 |
+
img2 (numpy.ndarray): Second video frame.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
avg_magnitude (float): Average optical flow magnitude for the frame pair.
|
| 126 |
+
"""
|
| 127 |
+
# Calculate the optical flow
|
| 128 |
+
prev_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
|
| 129 |
+
curr_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
|
| 130 |
+
flow = cv2.calcOpticalFlowFarneback(prev_gray, curr_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
|
| 131 |
+
|
| 132 |
+
# Calculate the magnitude of the optical flow vectors
|
| 133 |
+
magnitude = cv2.magnitude(flow[..., 0], flow[..., 1])
|
| 134 |
+
avg_magnitude = cv2.mean(magnitude)[0]
|
| 135 |
+
return avg_magnitude
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# Function to calculate Intersection over Union (IoU) for audio and video peaks
|
| 139 |
+
def calc_intersection_over_union(audio_peaks, video_peaks, fps):
|
| 140 |
+
"""
|
| 141 |
+
Calculate Intersection over Union (IoU) between audio and video peaks.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
audio_peaks (list): List of audio peak times (in seconds).
|
| 145 |
+
video_peaks (list): List of video peak times (in seconds).
|
| 146 |
+
fps (float): Frame rate of the video.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
iou (float): Intersection over Union score.
|
| 150 |
+
"""
|
| 151 |
+
intersection_length = 0
|
| 152 |
+
for audio_peak in audio_peaks:
|
| 153 |
+
for video_peak in video_peaks:
|
| 154 |
+
if video_peak - 1 / fps < audio_peak < video_peak + 1 / fps:
|
| 155 |
+
intersection_length += 1
|
| 156 |
+
break
|
| 157 |
+
return intersection_length / (len(audio_peaks) + len(video_peaks) - intersection_length)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def multi_wrapper(args):
|
| 161 |
+
return wrapper_cal_av_score(*args)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def wrapper_cal_av_score(video_path, audio_path):
|
| 165 |
+
video_id = os.path.basename(video_path)[:-4]
|
| 166 |
+
try:
|
| 167 |
+
frames, fps = extract_frames(video_path)
|
| 168 |
+
audio_peaks = detect_audio_peaks(audio_path)
|
| 169 |
+
flow_trajectory, video_peaks = detect_video_peaks(frames, fps)
|
| 170 |
+
score = calc_intersection_over_union(audio_peaks, video_peaks, fps)
|
| 171 |
+
print(f'Calculated AV Align score for {os.path.basename(video_path)}')
|
| 172 |
+
except:
|
| 173 |
+
score = -1
|
| 174 |
+
print(f'Cannot Calculate AV Align score for {os.path.basename(video_path)}')
|
| 175 |
+
|
| 176 |
+
data = {
|
| 177 |
+
'video_id': video_id,
|
| 178 |
+
'score': score,
|
| 179 |
+
}
|
| 180 |
+
return data
|
| 181 |
+
|
| 182 |
+
if __name__ == "__main__":
|
| 183 |
+
|
| 184 |
+
threshold = 0.2
|
| 185 |
+
video_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/videos'
|
| 186 |
+
audio_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/audios_vggsound'
|
| 187 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/filter_dataset'
|
| 188 |
+
|
| 189 |
+
audio_file_list = os.listdir(audio_dir)
|
| 190 |
+
|
| 191 |
+
video_audio_list = []
|
| 192 |
+
|
| 193 |
+
for audio_file in tqdm(audio_file_list):
|
| 194 |
+
video_id = audio_file[:-4]
|
| 195 |
+
video_file = video_id + '.mp4'
|
| 196 |
+
video_path = os.path.join(video_dir, video_file)
|
| 197 |
+
audio_path = os.path.join(audio_dir, audio_file)
|
| 198 |
+
video_audio_list.append((video_path, audio_path))
|
| 199 |
+
|
| 200 |
+
cores = multiprocessing.cpu_count()
|
| 201 |
+
pool = multiprocessing.Pool(processes=cores)
|
| 202 |
+
|
| 203 |
+
results = list(tqdm(pool.imap(multi_wrapper, video_audio_list), total=len(video_audio_list)))
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
#with multiprocessing.Pool(processes=cores) as pool:
|
| 207 |
+
# results = pool.imap(multi_wrapper, video_audio_list)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
output_df = pd.DataFrame(results)
|
| 211 |
+
output_df.to_csv(os.path.join(output_dir, 'vggsound_av_align_score_latest.tsv') , sep='\t', index=False)
|
| 212 |
+
|
| 213 |
+
print("Finished !!!")
|
filter_dataset/extract_audio.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import moviepy
|
| 3 |
+
from moviepy.audio.AudioClip import AudioArrayClip
|
| 4 |
+
from moviepy.editor import VideoFileClip
|
| 5 |
+
import numpy as np
|
| 6 |
+
import argparse
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
video_root = '/inspire/hdd/global_user/zhoutao-240108120126/Movie-Gen-Audio/MovieGenAudioBenchSfx/video_with_audio'
|
| 10 |
+
audio_root = '/inspire/hdd/global_user/zhoutao-240108120126/Movie-Gen-Audio/MovieGenAudioBenchSfx/audio-only'
|
| 11 |
+
|
| 12 |
+
if not os.path.exists(audio_root):
|
| 13 |
+
os.makedirs(audio_root)
|
| 14 |
+
|
| 15 |
+
v_name_list = os.listdir(video_root)
|
| 16 |
+
for v_name in v_name_list:
|
| 17 |
+
v_id = v_name[:-4]
|
| 18 |
+
v_path = os.path.join(video_root, v_name)
|
| 19 |
+
audio_name = v_id + '.wav'
|
| 20 |
+
dst_path = os.path.join(audio_root, audio_name)
|
| 21 |
+
try:
|
| 22 |
+
video = VideoFileClip(v_path)
|
| 23 |
+
audio_fps = video.audio.fps
|
| 24 |
+
audio = video.audio
|
| 25 |
+
audio.write_audiofile(dst_path, fps=audio_fps)
|
| 26 |
+
print("finish video id: " + audio_name)
|
| 27 |
+
except:
|
| 28 |
+
print('Failed video id: {}' + audio_name)
|
filter_dataset/filter_vggsound.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
av_align_tsv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/filter_dataset/vggsound_av_align_score.tsv'
|
| 5 |
+
train_tsv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/sets/vgg-train-filtered.tsv'
|
| 6 |
+
val_tsv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/sets/vgg-val-filtered.tsv'
|
| 7 |
+
test_tsv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/sets/vgg-test-filtered.tsv'
|
| 8 |
+
filtered_tsv_saved_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/filter_dataset/filtered_vggsound'
|
| 9 |
+
|
| 10 |
+
THRESHOLD = 0.2 # can be cnaged accordingly
|
| 11 |
+
|
| 12 |
+
def filter_vggsound(tsv_path, saved_video_ids, filtered_tsv_saved_dir, saved_tsv_name=''):
|
| 13 |
+
df_list = pd.read_csv(tsv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 14 |
+
saved_id_label_list = []
|
| 15 |
+
for record in tqdm(df_list):
|
| 16 |
+
id = record['id']
|
| 17 |
+
label = record['label']
|
| 18 |
+
if id in saved_video_ids:
|
| 19 |
+
data = {
|
| 20 |
+
'id': id,
|
| 21 |
+
'label': label
|
| 22 |
+
}
|
| 23 |
+
saved_id_label_list.append(data)
|
| 24 |
+
|
| 25 |
+
output_df = pd.DataFrame(saved_id_label_list)
|
| 26 |
+
output_df.to_csv(os.path.join(filtered_tsv_saved_dir, saved_tsv_name), sep='\t', index=False)
|
| 27 |
+
print(f'Saved to: {saved_tsv_name}')
|
| 28 |
+
|
| 29 |
+
# read the tsv for subset information
|
| 30 |
+
df_list = pd.read_csv(av_align_tsv_path, sep='\t', dtype={'video_id': str}).to_dict('records')
|
| 31 |
+
saved_video_ids = []
|
| 32 |
+
for record in tqdm(df_list):
|
| 33 |
+
id = record['video_id']
|
| 34 |
+
score = record['score']
|
| 35 |
+
if score > THRESHOLD:
|
| 36 |
+
saved_video_ids.append(id)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
filter_vggsound(train_tsv_path, saved_video_ids, filtered_tsv_saved_dir, saved_tsv_name='train_filter_av_align.tsv')
|
| 40 |
+
filter_vggsound(val_tsv_path, saved_video_ids, filtered_tsv_saved_dir, saved_tsv_name='val_filter_av_align.tsv')
|
| 41 |
+
filter_vggsound(test_tsv_path, saved_video_ids, filtered_tsv_saved_dir, saved_tsv_name='test_filter_av_align.tsv')
|
| 42 |
+
|
| 43 |
+
|
filter_dataset/get_testing_audio_gt.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import shutil
|
| 5 |
+
|
| 6 |
+
full_audio_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/videos' # audios_vggsound
|
| 7 |
+
vgg_test_tsv = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/sets/vgg-test-filtered.tsv'
|
| 8 |
+
test_audio_gt_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/test_videos_new' # gt_audio_test
|
| 9 |
+
|
| 10 |
+
audio_files_list = os.listdir(full_audio_dir)
|
| 11 |
+
|
| 12 |
+
df_list = pd.read_csv(vgg_test_tsv, sep='\t', dtype={'id': str}).to_dict('records')
|
| 13 |
+
count = 0
|
| 14 |
+
for record in tqdm(df_list):
|
| 15 |
+
id = record['id']
|
| 16 |
+
label = record['label']
|
| 17 |
+
if f'{id}.mp4' in audio_files_list:
|
| 18 |
+
current_audio_path = os.path.join(full_audio_dir, f'{id}.mp4')
|
| 19 |
+
target_audio_path = os.path.join(test_audio_gt_dir, f'{id}.mp4')
|
| 20 |
+
shutil.copyfile(current_audio_path, target_audio_path)
|
| 21 |
+
count += 1
|
| 22 |
+
|
| 23 |
+
print(f'Moved {count} files !')
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
filter_dataset/get_train_audio_gt.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import shutil
|
| 5 |
+
|
| 6 |
+
full_audio_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/videos' # audios_vggsound
|
| 7 |
+
vgg_test_tsv = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/sets/vgg-train-filtered.tsv'
|
| 8 |
+
test_audio_gt_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/train_videos_new' # copy val subset
|
| 9 |
+
|
| 10 |
+
audio_files_list = os.listdir(full_audio_dir)
|
| 11 |
+
|
| 12 |
+
df_list = pd.read_csv(vgg_test_tsv, sep='\t', dtype={'id': str}).to_dict('records')
|
| 13 |
+
count = 0
|
| 14 |
+
for record in tqdm(df_list):
|
| 15 |
+
id = record['id']
|
| 16 |
+
label = record['label']
|
| 17 |
+
if f'{id}.mp4' in audio_files_list:
|
| 18 |
+
current_audio_path = os.path.join(full_audio_dir, f'{id}.mp4')
|
| 19 |
+
target_audio_path = os.path.join(test_audio_gt_dir, f'{id}.mp4')
|
| 20 |
+
shutil.copyfile(current_audio_path, target_audio_path)
|
| 21 |
+
count += 1
|
| 22 |
+
|
| 23 |
+
print(f'Moved {count} files !')
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
filter_dataset/get_val_audio_gt.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import shutil
|
| 5 |
+
|
| 6 |
+
full_audio_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/videos' # audios_vggsound
|
| 7 |
+
vgg_test_tsv = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/sets/vgg-val-filtered.tsv'
|
| 8 |
+
test_audio_gt_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/val_videos_new' # copy val subset
|
| 9 |
+
|
| 10 |
+
audio_files_list = os.listdir(full_audio_dir)
|
| 11 |
+
|
| 12 |
+
df_list = pd.read_csv(vgg_test_tsv, sep='\t', dtype={'id': str}).to_dict('records')
|
| 13 |
+
count = 0
|
| 14 |
+
for record in tqdm(df_list):
|
| 15 |
+
id = record['id']
|
| 16 |
+
label = record['label']
|
| 17 |
+
if f'{id}.mp4' in audio_files_list:
|
| 18 |
+
current_audio_path = os.path.join(full_audio_dir, f'{id}.mp4')
|
| 19 |
+
target_audio_path = os.path.join(test_audio_gt_dir, f'{id}.mp4')
|
| 20 |
+
shutil.copyfile(current_audio_path, target_audio_path)
|
| 21 |
+
count += 1
|
| 22 |
+
|
| 23 |
+
print(f'Moved {count} files !')
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
gradio_demo.py
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from argparse import ArgumentParser
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from fractions import Fraction
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
import gradio as gr
|
| 8 |
+
import torch
|
| 9 |
+
import torchaudio
|
| 10 |
+
|
| 11 |
+
from mmaudio.eval_utils import (ModelConfig, VideoInfo, all_model_cfg, generate, load_image,
|
| 12 |
+
load_video, make_video, setup_eval_logging)
|
| 13 |
+
from mmaudio.model.flow_matching import FlowMatching
|
| 14 |
+
from mmaudio.model.networks import MMAudio, get_my_mmaudio
|
| 15 |
+
from mmaudio.model.sequence_config import SequenceConfig
|
| 16 |
+
from mmaudio.model.utils.features_utils import FeaturesUtils
|
| 17 |
+
|
| 18 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 19 |
+
torch.backends.cudnn.allow_tf32 = True
|
| 20 |
+
|
| 21 |
+
log = logging.getLogger()
|
| 22 |
+
|
| 23 |
+
device = 'cpu'
|
| 24 |
+
if torch.cuda.is_available():
|
| 25 |
+
device = 'cuda'
|
| 26 |
+
elif torch.backends.mps.is_available():
|
| 27 |
+
device = 'mps'
|
| 28 |
+
else:
|
| 29 |
+
log.warning('CUDA/MPS are not available, running on CPU')
|
| 30 |
+
dtype = torch.bfloat16
|
| 31 |
+
|
| 32 |
+
model: ModelConfig = all_model_cfg['large_44k_v2']
|
| 33 |
+
model.download_if_needed()
|
| 34 |
+
output_dir = Path('./output/gradio')
|
| 35 |
+
|
| 36 |
+
setup_eval_logging()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def get_model() -> tuple[MMAudio, FeaturesUtils, SequenceConfig]:
|
| 40 |
+
seq_cfg = model.seq_cfg
|
| 41 |
+
|
| 42 |
+
net: MMAudio = get_my_mmaudio(model.model_name).to(device, dtype).eval()
|
| 43 |
+
net.load_weights(torch.load(model.model_path, map_location=device, weights_only=True))
|
| 44 |
+
log.info(f'Loaded weights from {model.model_path}')
|
| 45 |
+
|
| 46 |
+
feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path,
|
| 47 |
+
synchformer_ckpt=model.synchformer_ckpt,
|
| 48 |
+
enable_conditions=True,
|
| 49 |
+
mode=model.mode,
|
| 50 |
+
bigvgan_vocoder_ckpt=model.bigvgan_16k_path,
|
| 51 |
+
need_vae_encoder=False)
|
| 52 |
+
feature_utils = feature_utils.to(device, dtype).eval()
|
| 53 |
+
|
| 54 |
+
return net, feature_utils, seq_cfg
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
net, feature_utils, seq_cfg = get_model()
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@torch.inference_mode()
|
| 61 |
+
def video_to_audio(video: gr.Video, prompt: str, negative_prompt: str, seed: int, num_steps: int,
|
| 62 |
+
cfg_strength: float, duration: float):
|
| 63 |
+
|
| 64 |
+
rng = torch.Generator(device=device)
|
| 65 |
+
if seed >= 0:
|
| 66 |
+
rng.manual_seed(seed)
|
| 67 |
+
else:
|
| 68 |
+
rng.seed()
|
| 69 |
+
fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps)
|
| 70 |
+
|
| 71 |
+
video_info = load_video(video, duration)
|
| 72 |
+
clip_frames = video_info.clip_frames
|
| 73 |
+
sync_frames = video_info.sync_frames
|
| 74 |
+
duration = video_info.duration_sec
|
| 75 |
+
clip_frames = clip_frames.unsqueeze(0)
|
| 76 |
+
sync_frames = sync_frames.unsqueeze(0)
|
| 77 |
+
seq_cfg.duration = duration
|
| 78 |
+
net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len)
|
| 79 |
+
|
| 80 |
+
audios = generate(clip_frames,
|
| 81 |
+
sync_frames, [prompt],
|
| 82 |
+
negative_text=[negative_prompt],
|
| 83 |
+
feature_utils=feature_utils,
|
| 84 |
+
net=net,
|
| 85 |
+
fm=fm,
|
| 86 |
+
rng=rng,
|
| 87 |
+
cfg_strength=cfg_strength)
|
| 88 |
+
audio = audios.float().cpu()[0]
|
| 89 |
+
|
| 90 |
+
current_time_string = datetime.now().strftime('%Y%m%d_%H%M%S')
|
| 91 |
+
output_dir.mkdir(exist_ok=True, parents=True)
|
| 92 |
+
video_save_path = output_dir / f'{current_time_string}.mp4'
|
| 93 |
+
make_video(video_info, video_save_path, audio, sampling_rate=seq_cfg.sampling_rate)
|
| 94 |
+
return video_save_path
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@torch.inference_mode()
|
| 98 |
+
def image_to_audio(image: gr.Image, prompt: str, negative_prompt: str, seed: int, num_steps: int,
|
| 99 |
+
cfg_strength: float, duration: float):
|
| 100 |
+
|
| 101 |
+
rng = torch.Generator(device=device)
|
| 102 |
+
if seed >= 0:
|
| 103 |
+
rng.manual_seed(seed)
|
| 104 |
+
else:
|
| 105 |
+
rng.seed()
|
| 106 |
+
fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps)
|
| 107 |
+
|
| 108 |
+
image_info = load_image(image)
|
| 109 |
+
clip_frames = image_info.clip_frames
|
| 110 |
+
sync_frames = image_info.sync_frames
|
| 111 |
+
clip_frames = clip_frames.unsqueeze(0)
|
| 112 |
+
sync_frames = sync_frames.unsqueeze(0)
|
| 113 |
+
seq_cfg.duration = duration
|
| 114 |
+
net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len)
|
| 115 |
+
|
| 116 |
+
audios = generate(clip_frames,
|
| 117 |
+
sync_frames, [prompt],
|
| 118 |
+
negative_text=[negative_prompt],
|
| 119 |
+
feature_utils=feature_utils,
|
| 120 |
+
net=net,
|
| 121 |
+
fm=fm,
|
| 122 |
+
rng=rng,
|
| 123 |
+
cfg_strength=cfg_strength,
|
| 124 |
+
image_input=True)
|
| 125 |
+
audio = audios.float().cpu()[0]
|
| 126 |
+
|
| 127 |
+
current_time_string = datetime.now().strftime('%Y%m%d_%H%M%S')
|
| 128 |
+
output_dir.mkdir(exist_ok=True, parents=True)
|
| 129 |
+
video_save_path = output_dir / f'{current_time_string}.mp4'
|
| 130 |
+
video_info = VideoInfo.from_image_info(image_info, duration, fps=Fraction(1))
|
| 131 |
+
make_video(video_info, video_save_path, audio, sampling_rate=seq_cfg.sampling_rate)
|
| 132 |
+
return video_save_path
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@torch.inference_mode()
|
| 136 |
+
def text_to_audio(prompt: str, negative_prompt: str, seed: int, num_steps: int, cfg_strength: float,
|
| 137 |
+
duration: float):
|
| 138 |
+
|
| 139 |
+
rng = torch.Generator(device=device)
|
| 140 |
+
if seed >= 0:
|
| 141 |
+
rng.manual_seed(seed)
|
| 142 |
+
else:
|
| 143 |
+
rng.seed()
|
| 144 |
+
fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps)
|
| 145 |
+
|
| 146 |
+
clip_frames = sync_frames = None
|
| 147 |
+
seq_cfg.duration = duration
|
| 148 |
+
net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len)
|
| 149 |
+
|
| 150 |
+
audios = generate(clip_frames,
|
| 151 |
+
sync_frames, [prompt],
|
| 152 |
+
negative_text=[negative_prompt],
|
| 153 |
+
feature_utils=feature_utils,
|
| 154 |
+
net=net,
|
| 155 |
+
fm=fm,
|
| 156 |
+
rng=rng,
|
| 157 |
+
cfg_strength=cfg_strength)
|
| 158 |
+
audio = audios.float().cpu()[0]
|
| 159 |
+
|
| 160 |
+
current_time_string = datetime.now().strftime('%Y%m%d_%H%M%S')
|
| 161 |
+
output_dir.mkdir(exist_ok=True, parents=True)
|
| 162 |
+
audio_save_path = output_dir / f'{current_time_string}.flac'
|
| 163 |
+
torchaudio.save(audio_save_path, audio, seq_cfg.sampling_rate)
|
| 164 |
+
return audio_save_path
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
video_to_audio_tab = gr.Interface(
|
| 168 |
+
fn=video_to_audio,
|
| 169 |
+
description="""
|
| 170 |
+
Project page: <a href="https://hkchengrex.com/MMAudio/">https://hkchengrex.com/MMAudio/</a><br>
|
| 171 |
+
Code: <a href="https://github.com/hkchengrex/MMAudio">https://github.com/hkchengrex/MMAudio</a><br>
|
| 172 |
+
|
| 173 |
+
NOTE: It takes longer to process high-resolution videos (>384 px on the shorter side).
|
| 174 |
+
Doing so does not improve results.
|
| 175 |
+
""",
|
| 176 |
+
inputs=[
|
| 177 |
+
gr.Video(),
|
| 178 |
+
gr.Text(label='Prompt'),
|
| 179 |
+
gr.Text(label='Negative prompt', value='music'),
|
| 180 |
+
gr.Number(label='Seed (-1: random)', value=-1, precision=0, minimum=-1),
|
| 181 |
+
gr.Number(label='Num steps', value=25, precision=0, minimum=1),
|
| 182 |
+
gr.Number(label='Guidance Strength', value=4.5, minimum=1),
|
| 183 |
+
gr.Number(label='Duration (sec)', value=8, minimum=1),
|
| 184 |
+
],
|
| 185 |
+
outputs='playable_video',
|
| 186 |
+
cache_examples=False,
|
| 187 |
+
title='MMAudio — Video-to-Audio Synthesis',
|
| 188 |
+
examples=[
|
| 189 |
+
[
|
| 190 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/sora_beach.mp4',
|
| 191 |
+
'waves, seagulls',
|
| 192 |
+
'',
|
| 193 |
+
0,
|
| 194 |
+
25,
|
| 195 |
+
4.5,
|
| 196 |
+
10,
|
| 197 |
+
],
|
| 198 |
+
[
|
| 199 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/sora_serpent.mp4',
|
| 200 |
+
'',
|
| 201 |
+
'music',
|
| 202 |
+
0,
|
| 203 |
+
25,
|
| 204 |
+
4.5,
|
| 205 |
+
10,
|
| 206 |
+
],
|
| 207 |
+
[
|
| 208 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/sora_seahorse.mp4',
|
| 209 |
+
'bubbles',
|
| 210 |
+
'',
|
| 211 |
+
0,
|
| 212 |
+
25,
|
| 213 |
+
4.5,
|
| 214 |
+
10,
|
| 215 |
+
],
|
| 216 |
+
[
|
| 217 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/sora_india.mp4',
|
| 218 |
+
'Indian holy music',
|
| 219 |
+
'',
|
| 220 |
+
0,
|
| 221 |
+
25,
|
| 222 |
+
4.5,
|
| 223 |
+
10,
|
| 224 |
+
],
|
| 225 |
+
[
|
| 226 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/sora_galloping.mp4',
|
| 227 |
+
'galloping',
|
| 228 |
+
'',
|
| 229 |
+
0,
|
| 230 |
+
25,
|
| 231 |
+
4.5,
|
| 232 |
+
10,
|
| 233 |
+
],
|
| 234 |
+
[
|
| 235 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/sora_kraken.mp4',
|
| 236 |
+
'waves, storm',
|
| 237 |
+
'',
|
| 238 |
+
0,
|
| 239 |
+
25,
|
| 240 |
+
4.5,
|
| 241 |
+
10,
|
| 242 |
+
],
|
| 243 |
+
[
|
| 244 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/mochi_storm.mp4',
|
| 245 |
+
'storm',
|
| 246 |
+
'',
|
| 247 |
+
0,
|
| 248 |
+
25,
|
| 249 |
+
4.5,
|
| 250 |
+
10,
|
| 251 |
+
],
|
| 252 |
+
[
|
| 253 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/hunyuan_spring.mp4',
|
| 254 |
+
'',
|
| 255 |
+
'',
|
| 256 |
+
0,
|
| 257 |
+
25,
|
| 258 |
+
4.5,
|
| 259 |
+
10,
|
| 260 |
+
],
|
| 261 |
+
[
|
| 262 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/hunyuan_typing.mp4',
|
| 263 |
+
'typing',
|
| 264 |
+
'',
|
| 265 |
+
0,
|
| 266 |
+
25,
|
| 267 |
+
4.5,
|
| 268 |
+
10,
|
| 269 |
+
],
|
| 270 |
+
[
|
| 271 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/hunyuan_wake_up.mp4',
|
| 272 |
+
'',
|
| 273 |
+
'',
|
| 274 |
+
0,
|
| 275 |
+
25,
|
| 276 |
+
4.5,
|
| 277 |
+
10,
|
| 278 |
+
],
|
| 279 |
+
[
|
| 280 |
+
'https://huggingface.co/hkchengrex/MMAudio/resolve/main/examples/sora_nyc.mp4',
|
| 281 |
+
'',
|
| 282 |
+
'',
|
| 283 |
+
0,
|
| 284 |
+
25,
|
| 285 |
+
4.5,
|
| 286 |
+
10,
|
| 287 |
+
],
|
| 288 |
+
])
|
| 289 |
+
|
| 290 |
+
text_to_audio_tab = gr.Interface(
|
| 291 |
+
fn=text_to_audio,
|
| 292 |
+
description="""
|
| 293 |
+
Project page: <a href="https://hkchengrex.com/MMAudio/">https://hkchengrex.com/MMAudio/</a><br>
|
| 294 |
+
Code: <a href="https://github.com/hkchengrex/MMAudio">https://github.com/hkchengrex/MMAudio</a><br>
|
| 295 |
+
""",
|
| 296 |
+
inputs=[
|
| 297 |
+
gr.Text(label='Prompt'),
|
| 298 |
+
gr.Text(label='Negative prompt'),
|
| 299 |
+
gr.Number(label='Seed (-1: random)', value=-1, precision=0, minimum=-1),
|
| 300 |
+
gr.Number(label='Num steps', value=25, precision=0, minimum=1),
|
| 301 |
+
gr.Number(label='Guidance Strength', value=4.5, minimum=1),
|
| 302 |
+
gr.Number(label='Duration (sec)', value=8, minimum=1),
|
| 303 |
+
],
|
| 304 |
+
outputs='audio',
|
| 305 |
+
cache_examples=False,
|
| 306 |
+
title='MMAudio — Text-to-Audio Synthesis',
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
image_to_audio_tab = gr.Interface(
|
| 310 |
+
fn=image_to_audio,
|
| 311 |
+
description="""
|
| 312 |
+
Project page: <a href="https://hkchengrex.com/MMAudio/">https://hkchengrex.com/MMAudio/</a><br>
|
| 313 |
+
Code: <a href="https://github.com/hkchengrex/MMAudio">https://github.com/hkchengrex/MMAudio</a><br>
|
| 314 |
+
|
| 315 |
+
NOTE: It takes longer to process high-resolution images (>384 px on the shorter side).
|
| 316 |
+
Doing so does not improve results.
|
| 317 |
+
""",
|
| 318 |
+
inputs=[
|
| 319 |
+
gr.Image(type='filepath'),
|
| 320 |
+
gr.Text(label='Prompt'),
|
| 321 |
+
gr.Text(label='Negative prompt'),
|
| 322 |
+
gr.Number(label='Seed (-1: random)', value=-1, precision=0, minimum=-1),
|
| 323 |
+
gr.Number(label='Num steps', value=25, precision=0, minimum=1),
|
| 324 |
+
gr.Number(label='Guidance Strength', value=4.5, minimum=1),
|
| 325 |
+
gr.Number(label='Duration (sec)', value=8, minimum=1),
|
| 326 |
+
],
|
| 327 |
+
outputs='playable_video',
|
| 328 |
+
cache_examples=False,
|
| 329 |
+
title='MMAudio — Image-to-Audio Synthesis (experimental)',
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
if __name__ == "__main__":
|
| 333 |
+
parser = ArgumentParser()
|
| 334 |
+
parser.add_argument('--port', type=int, default=7860)
|
| 335 |
+
args = parser.parse_args()
|
| 336 |
+
|
| 337 |
+
gr.TabbedInterface([video_to_audio_tab, text_to_audio_tab, image_to_audio_tab],
|
| 338 |
+
['Video-to-Audio', 'Text-to-Audio', 'Image-to-Audio (experimental)']).launch(
|
| 339 |
+
server_port=args.port, allowed_paths=[output_dir])
|
mmaudio/runner.py
ADDED
|
@@ -0,0 +1,938 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
trainer.py - wrapper and utility functions for network training
|
| 3 |
+
Compute loss, back-prop, update parameters, logging, etc.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Optional, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.distributed
|
| 11 |
+
import torch.optim as optim
|
| 12 |
+
#from av_bench.evaluate import evaluate # todo Jan 12
|
| 13 |
+
#from av_bench.extract import extract
|
| 14 |
+
from nitrous_ema import PostHocEMA
|
| 15 |
+
from omegaconf import DictConfig
|
| 16 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 17 |
+
|
| 18 |
+
from mmaudio.model.flow_matching import FlowMatching
|
| 19 |
+
#from mmaudio.model.networks import get_my_mmaudio
|
| 20 |
+
from mmaudio.model.networks_new import get_my_mmaudio
|
| 21 |
+
from mmaudio.model.sequence_config import CONFIG_16K, CONFIG_44K
|
| 22 |
+
from mmaudio.model.utils.features_utils import FeaturesUtils
|
| 23 |
+
from mmaudio.model.utils.parameter_groups import get_parameter_groups
|
| 24 |
+
from mmaudio.model.utils.sample_utils import log_normal_sample
|
| 25 |
+
from mmaudio.utils.dist_utils import (info_if_rank_zero, local_rank, string_if_rank_zero)
|
| 26 |
+
from mmaudio.utils.log_integrator import Integrator
|
| 27 |
+
from mmaudio.utils.logger import TensorboardLogger
|
| 28 |
+
from mmaudio.utils.time_estimator import PartialTimeEstimator, TimeEstimator
|
| 29 |
+
from mmaudio.utils.video_joiner import VideoJoiner
|
| 30 |
+
|
| 31 |
+
import torch.nn.functional as F
|
| 32 |
+
import copy # todo Mar 2
|
| 33 |
+
|
| 34 |
+
class Runner:
|
| 35 |
+
|
| 36 |
+
def __init__(self,
|
| 37 |
+
cfg: DictConfig,
|
| 38 |
+
log: TensorboardLogger,
|
| 39 |
+
run_path: Union[str, Path],
|
| 40 |
+
for_training: bool = True,
|
| 41 |
+
latent_mean: Optional[torch.Tensor] = None,
|
| 42 |
+
latent_std: Optional[torch.Tensor] = None,
|
| 43 |
+
dpo_train: bool = False):
|
| 44 |
+
self.exp_id = cfg.exp_id
|
| 45 |
+
self.use_amp = cfg.amp
|
| 46 |
+
self.for_training = for_training
|
| 47 |
+
self.cfg = cfg
|
| 48 |
+
self.dpo_train = dpo_train # todo Mar 2
|
| 49 |
+
|
| 50 |
+
if cfg.model.endswith('16k'):
|
| 51 |
+
self.seq_cfg = CONFIG_16K
|
| 52 |
+
mode = '16k'
|
| 53 |
+
elif cfg.model.endswith('44k'):
|
| 54 |
+
self.seq_cfg = CONFIG_44K
|
| 55 |
+
mode = '44k'
|
| 56 |
+
else:
|
| 57 |
+
raise ValueError(f'Unknown model: {cfg.model}')
|
| 58 |
+
|
| 59 |
+
self.sample_rate = self.seq_cfg.sampling_rate
|
| 60 |
+
self.duration_sec = self.seq_cfg.duration
|
| 61 |
+
|
| 62 |
+
# setting up the model
|
| 63 |
+
empty_string_feat = torch.load('./ext_weights/empty_string.pth', weights_only=True)[0]
|
| 64 |
+
|
| 65 |
+
self.network = DDP(get_my_mmaudio(cfg.model,
|
| 66 |
+
latent_mean=latent_mean,
|
| 67 |
+
latent_std=latent_std,
|
| 68 |
+
empty_string_feat=empty_string_feat).cuda(),
|
| 69 |
+
device_ids=[local_rank],
|
| 70 |
+
broadcast_buffers=False,
|
| 71 |
+
find_unused_parameters=True)
|
| 72 |
+
|
| 73 |
+
if dpo_train: #
|
| 74 |
+
self.beta_dpo = cfg.beta_dpo #10000 # todo 1000, 2000, 5000
|
| 75 |
+
map_location = 'cuda:%d' % local_rank
|
| 76 |
+
pretrained_ckpt = torch.load(cfg['pretrained_ckpt_for_dpo'], map_location={'cuda:0': map_location}, weights_only=True)
|
| 77 |
+
self.network.module.load_state_dict(pretrained_ckpt, strict=True)
|
| 78 |
+
info_if_rank_zero(log, 'Loading pretrained weights from the disk')
|
| 79 |
+
|
| 80 |
+
self.ref_network = copy.deepcopy(self.network)
|
| 81 |
+
self.ref_network.requires_grad_ = False
|
| 82 |
+
self.ref_network.eval()
|
| 83 |
+
for param in self.ref_network.parameters():
|
| 84 |
+
param.requires_grad = False
|
| 85 |
+
|
| 86 |
+
if cfg.compile:
|
| 87 |
+
# NOTE: though train_fn and val_fn are very similar
|
| 88 |
+
# (early on they are implemented as a single function)
|
| 89 |
+
# keeping them separate and compiling them separately are CRUCIAL for high performance
|
| 90 |
+
self.train_fn = torch.compile(self.train_fn)
|
| 91 |
+
self.val_fn = torch.compile(self.val_fn)
|
| 92 |
+
|
| 93 |
+
if cfg.compile and dpo_train: # todo Mar 2
|
| 94 |
+
self.train_fn_dpo = torch.compile(self.train_fn_dpo)
|
| 95 |
+
|
| 96 |
+
self.fm = FlowMatching(cfg.sampling.min_sigma,
|
| 97 |
+
inference_mode=cfg.sampling.method,
|
| 98 |
+
num_steps=cfg.sampling.num_steps)
|
| 99 |
+
|
| 100 |
+
# ema profile
|
| 101 |
+
if for_training and cfg.ema.enable and local_rank == 0:
|
| 102 |
+
self.ema = PostHocEMA(self.network.module,
|
| 103 |
+
sigma_rels=cfg.ema.sigma_rels,
|
| 104 |
+
update_every=cfg.ema.update_every,
|
| 105 |
+
checkpoint_every_num_steps=cfg.ema.checkpoint_every,
|
| 106 |
+
checkpoint_folder=cfg.ema.checkpoint_folder,
|
| 107 |
+
step_size_correction=True).cuda()
|
| 108 |
+
self.ema_start = cfg.ema.start
|
| 109 |
+
else:
|
| 110 |
+
self.ema = None
|
| 111 |
+
|
| 112 |
+
self.rng = torch.Generator(device='cuda')
|
| 113 |
+
self.rng.manual_seed(cfg['seed'] + local_rank)
|
| 114 |
+
|
| 115 |
+
# setting up feature extractors and VAEs
|
| 116 |
+
if mode == '16k':
|
| 117 |
+
self.features = FeaturesUtils(
|
| 118 |
+
tod_vae_ckpt=cfg['vae_16k_ckpt'],
|
| 119 |
+
bigvgan_vocoder_ckpt=cfg['bigvgan_vocoder_ckpt'],
|
| 120 |
+
synchformer_ckpt=cfg['synchformer_ckpt'],
|
| 121 |
+
enable_conditions=True,
|
| 122 |
+
mode=mode,
|
| 123 |
+
need_vae_encoder=False,
|
| 124 |
+
)
|
| 125 |
+
elif mode == '44k':
|
| 126 |
+
self.features = FeaturesUtils(
|
| 127 |
+
tod_vae_ckpt=cfg['vae_44k_ckpt'],
|
| 128 |
+
synchformer_ckpt=cfg['synchformer_ckpt'],
|
| 129 |
+
enable_conditions=True,
|
| 130 |
+
mode=mode,
|
| 131 |
+
need_vae_encoder=False,
|
| 132 |
+
)
|
| 133 |
+
self.features = self.features.cuda().eval()
|
| 134 |
+
|
| 135 |
+
if cfg.compile:
|
| 136 |
+
self.features.compile()
|
| 137 |
+
|
| 138 |
+
# hyperparameters
|
| 139 |
+
self.log_normal_sampling_mean = cfg.sampling.mean
|
| 140 |
+
self.log_normal_sampling_scale = cfg.sampling.scale
|
| 141 |
+
self.null_condition_probability = cfg.null_condition_probability
|
| 142 |
+
self.cfg_strength = cfg.cfg_strength
|
| 143 |
+
|
| 144 |
+
# todo add extra hyperparameters
|
| 145 |
+
self.text_condition_drop_probability = cfg.text_condition_drop_probability # todo Jan 16 [0.3, 0.5]
|
| 146 |
+
self.text_condition_drop_enable = cfg.text_condition_drop_enable # todo Jan 16 True or False
|
| 147 |
+
self.text_drop_step = cfg.text_drop_step # todo Jan 16
|
| 148 |
+
|
| 149 |
+
# setting up logging
|
| 150 |
+
self.log = log
|
| 151 |
+
self.run_path = Path(run_path)
|
| 152 |
+
vgg_cfg = cfg.dpo_data.VGGSound
|
| 153 |
+
if for_training:
|
| 154 |
+
self.val_video_joiner = VideoJoiner(vgg_cfg.root, self.run_path / 'val-sampled-videos',
|
| 155 |
+
self.sample_rate, self.duration_sec)
|
| 156 |
+
else:
|
| 157 |
+
self.test_video_joiner = VideoJoiner(vgg_cfg.root,
|
| 158 |
+
self.run_path / 'test-sampled-videos',
|
| 159 |
+
self.sample_rate, self.duration_sec)
|
| 160 |
+
string_if_rank_zero(self.log, 'model_size',
|
| 161 |
+
f'{sum([param.nelement() for param in self.network.parameters()])}')
|
| 162 |
+
string_if_rank_zero(
|
| 163 |
+
self.log, 'number_of_parameters_that_require_gradient: ',
|
| 164 |
+
str(
|
| 165 |
+
sum([
|
| 166 |
+
param.nelement()
|
| 167 |
+
for param in filter(lambda p: p.requires_grad, self.network.parameters())
|
| 168 |
+
])))
|
| 169 |
+
info_if_rank_zero(self.log, 'torch version: ' + torch.__version__)
|
| 170 |
+
self.train_integrator = Integrator(self.log, distributed=True)
|
| 171 |
+
self.val_integrator = Integrator(self.log, distributed=True)
|
| 172 |
+
|
| 173 |
+
# setting up optimizer and loss
|
| 174 |
+
if for_training:
|
| 175 |
+
self.enter_train()
|
| 176 |
+
parameter_groups = get_parameter_groups(self.network, cfg, print_log=(local_rank == 0))
|
| 177 |
+
self.optimizer = optim.AdamW(parameter_groups,
|
| 178 |
+
lr=cfg['learning_rate'],
|
| 179 |
+
weight_decay=cfg['weight_decay'],
|
| 180 |
+
betas=[0.9, 0.95],
|
| 181 |
+
eps=1e-6 if self.use_amp else 1e-8,
|
| 182 |
+
fused=True)
|
| 183 |
+
if self.use_amp:
|
| 184 |
+
self.scaler = torch.amp.GradScaler(init_scale=2048)
|
| 185 |
+
self.clip_grad_norm = cfg['clip_grad_norm']
|
| 186 |
+
|
| 187 |
+
# linearly warmup learning rate
|
| 188 |
+
linear_warmup_steps = cfg['linear_warmup_steps']
|
| 189 |
+
|
| 190 |
+
def warmup(currrent_step: int):
|
| 191 |
+
return (currrent_step + 1) / (linear_warmup_steps + 1)
|
| 192 |
+
|
| 193 |
+
warmup_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=warmup)
|
| 194 |
+
|
| 195 |
+
# setting up learning rate scheduler
|
| 196 |
+
if cfg['lr_schedule'] == 'constant':
|
| 197 |
+
next_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda _: 1)
|
| 198 |
+
elif cfg['lr_schedule'] == 'poly':
|
| 199 |
+
total_num_iter = cfg['iterations']
|
| 200 |
+
next_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer,
|
| 201 |
+
lr_lambda=lambda x:
|
| 202 |
+
(1 - (x / total_num_iter))**0.9)
|
| 203 |
+
elif cfg['lr_schedule'] == 'step':
|
| 204 |
+
next_scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
|
| 205 |
+
cfg['lr_schedule_steps'],
|
| 206 |
+
cfg['lr_schedule_gamma'])
|
| 207 |
+
else:
|
| 208 |
+
raise NotImplementedError
|
| 209 |
+
|
| 210 |
+
self.scheduler = optim.lr_scheduler.SequentialLR(self.optimizer,
|
| 211 |
+
[warmup_scheduler, next_scheduler],
|
| 212 |
+
[linear_warmup_steps])
|
| 213 |
+
|
| 214 |
+
# Logging info
|
| 215 |
+
self.log_text_interval = cfg['log_text_interval']
|
| 216 |
+
self.log_extra_interval = cfg['log_extra_interval']
|
| 217 |
+
self.save_weights_interval = cfg['save_weights_interval']
|
| 218 |
+
self.save_checkpoint_interval = cfg['save_checkpoint_interval']
|
| 219 |
+
self.save_copy_iterations = cfg['save_copy_iterations']
|
| 220 |
+
self.num_iterations = cfg['num_iterations']
|
| 221 |
+
if cfg['debug']:
|
| 222 |
+
self.log_text_interval = self.log_extra_interval = 1
|
| 223 |
+
|
| 224 |
+
# update() is called when we log metrics, within the logger
|
| 225 |
+
self.log.batch_timer = TimeEstimator(self.num_iterations, self.log_text_interval)
|
| 226 |
+
# update() is called every iteration, in this script
|
| 227 |
+
self.log.data_timer = PartialTimeEstimator(self.num_iterations, 1, ema_alpha=0.9)
|
| 228 |
+
else:
|
| 229 |
+
self.enter_val()
|
| 230 |
+
|
| 231 |
+
def train_fn(
|
| 232 |
+
self,
|
| 233 |
+
clip_f: torch.Tensor,
|
| 234 |
+
sync_f: torch.Tensor,
|
| 235 |
+
text_f: torch.Tensor,
|
| 236 |
+
a_mean: torch.Tensor,
|
| 237 |
+
a_std: torch.Tensor,
|
| 238 |
+
it: int = 0, # todo Jan 16
|
| 239 |
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 240 |
+
# sample
|
| 241 |
+
a_randn = torch.empty_like(a_mean).normal_(generator=self.rng)
|
| 242 |
+
x1 = a_mean + a_std * a_randn
|
| 243 |
+
bs = x1.shape[0] # batch_size * seq_len * num_channels
|
| 244 |
+
|
| 245 |
+
# normalize the latents
|
| 246 |
+
x1 = self.network.module.normalize(x1)
|
| 247 |
+
|
| 248 |
+
t = log_normal_sample(x1,
|
| 249 |
+
generator=self.rng,
|
| 250 |
+
m=self.log_normal_sampling_mean,
|
| 251 |
+
s=self.log_normal_sampling_scale)
|
| 252 |
+
x0, x1, xt, (clip_f, sync_f, text_f) = self.fm.get_x0_xt_c(x1,
|
| 253 |
+
t,
|
| 254 |
+
Cs=[clip_f, sync_f, text_f],
|
| 255 |
+
generator=self.rng)
|
| 256 |
+
|
| 257 |
+
# classifier-free training
|
| 258 |
+
samples = torch.rand(bs, device=x1.device, generator=self.rng)
|
| 259 |
+
|
| 260 |
+
# null mask is for when a video is provided but we decided to ignore it
|
| 261 |
+
null_video = (samples < self.null_condition_probability)
|
| 262 |
+
# complete mask is for when a video is not provided or we decided to ignore it
|
| 263 |
+
clip_f[null_video] = self.network.module.empty_clip_feat
|
| 264 |
+
sync_f[null_video] = self.network.module.empty_sync_feat
|
| 265 |
+
|
| 266 |
+
samples = torch.rand(bs, device=x1.device, generator=self.rng)
|
| 267 |
+
# todo Jan 16: add text drop prob schedule
|
| 268 |
+
if self.text_condition_drop_enable: # todo Jan 16
|
| 269 |
+
drop_index = int(it // self.text_drop_step) # todo Jan 16
|
| 270 |
+
if drop_index >= len(self.text_condition_drop_probability): # todo Jan 16
|
| 271 |
+
text_condition_drop_prob = self.text_condition_drop_probability[-1] # todo Jan 16
|
| 272 |
+
|
| 273 |
+
else:
|
| 274 |
+
text_condition_drop_prob = self.text_condition_drop_probability[drop_index] # todo Jan 16
|
| 275 |
+
|
| 276 |
+
if it % self.text_drop_step == 0:
|
| 277 |
+
info_if_rank_zero(self.log, f'Text Condition Drop Prob is: {text_condition_drop_prob}') # todo Jan 16
|
| 278 |
+
|
| 279 |
+
null_text = (samples < text_condition_drop_prob) # todo Jan 16
|
| 280 |
+
else:
|
| 281 |
+
null_text = (samples < self.null_condition_probability) # todo Jan 16 [0.1, 0.2, 0.3, 0.4, 0.5]
|
| 282 |
+
|
| 283 |
+
#null_text = (samples < self.null_condition_probability) todo Jan 16 comment it when to use dynamic text condition drop
|
| 284 |
+
text_f[null_text] = self.network.module.empty_string_feat
|
| 285 |
+
|
| 286 |
+
pred_v = self.network(xt, clip_f, sync_f, text_f, t)
|
| 287 |
+
loss = self.fm.loss(pred_v, x0, x1)
|
| 288 |
+
mean_loss = loss.mean()
|
| 289 |
+
return x1, loss, mean_loss, t
|
| 290 |
+
|
| 291 |
+
def train_fn_dpo(
|
| 292 |
+
self,
|
| 293 |
+
clip_f: torch.Tensor,
|
| 294 |
+
sync_f: torch.Tensor,
|
| 295 |
+
text_f: torch.Tensor,
|
| 296 |
+
chosen_a_mean: torch.Tensor,
|
| 297 |
+
chosen_a_std: torch.Tensor,
|
| 298 |
+
reject_a_mean: torch.Tensor,
|
| 299 |
+
reject_a_std: torch.Tensor,
|
| 300 |
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 301 |
+
# sample
|
| 302 |
+
|
| 303 |
+
a_randn_chosen = torch.empty_like(chosen_a_mean).normal_(generator=self.rng)
|
| 304 |
+
x1_chosen = chosen_a_mean + chosen_a_std * a_randn_chosen
|
| 305 |
+
|
| 306 |
+
a_randn_reject = torch.empty_like(reject_a_mean).normal_(generator=self.rng)
|
| 307 |
+
x1_reject = reject_a_mean + reject_a_std * a_randn_reject
|
| 308 |
+
|
| 309 |
+
# normalize the latents
|
| 310 |
+
x1_chosen = self.network.module.normalize(x1_chosen)
|
| 311 |
+
x1_reject = self.network.module.normalize(x1_reject) # todo Mar 2 (needs to change)
|
| 312 |
+
|
| 313 |
+
x1 = torch.cat((x1_chosen, x1_reject), dim=0)
|
| 314 |
+
bs = x1.shape[0] # batch_size * seq_len * num_channels * 2 # todo Mar 2 needs to change
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
t = log_normal_sample(x1.chunk(2)[0], # todo keep the t same for both chosen and reject
|
| 318 |
+
generator=self.rng,
|
| 319 |
+
m=self.log_normal_sampling_mean,
|
| 320 |
+
s=self.log_normal_sampling_scale)
|
| 321 |
+
|
| 322 |
+
t = t.repeat(2) # todo
|
| 323 |
+
|
| 324 |
+
x0, x1, xt, (clip_f, sync_f, text_f) = self.fm.get_x0_xt_c_dpo(x1,
|
| 325 |
+
t,
|
| 326 |
+
Cs=[clip_f, sync_f, text_f],
|
| 327 |
+
generator=self.rng)
|
| 328 |
+
|
| 329 |
+
# classifier-free training
|
| 330 |
+
samples = torch.rand(bs//2, device=x1.device, generator=self.rng)
|
| 331 |
+
|
| 332 |
+
# null mask is for when a video is provided but we decided to ignore it
|
| 333 |
+
null_video = (samples < self.null_condition_probability)
|
| 334 |
+
# complete mask is for when a video is not provided or we decided to ignore it
|
| 335 |
+
clip_f[null_video] = self.network.module.empty_clip_feat
|
| 336 |
+
sync_f[null_video] = self.network.module.empty_sync_feat
|
| 337 |
+
|
| 338 |
+
samples = torch.rand(bs//2, device=x1.device, generator=self.rng)
|
| 339 |
+
|
| 340 |
+
# todo Jan 16: add text drop prob schedule
|
| 341 |
+
if self.text_condition_drop_enable: # todo Jan 16
|
| 342 |
+
drop_index = int(it // self.text_drop_step) # todo Jan 16
|
| 343 |
+
if drop_index >= len(self.text_condition_drop_probability): # todo Jan 16
|
| 344 |
+
text_condition_drop_prob = self.text_condition_drop_probability[-1] # todo Jan 16
|
| 345 |
+
|
| 346 |
+
else:
|
| 347 |
+
text_condition_drop_prob = self.text_condition_drop_probability[drop_index] # todo Jan 16
|
| 348 |
+
|
| 349 |
+
if it % self.text_drop_step == 0:
|
| 350 |
+
info_if_rank_zero(self.log, f'Text Condition Drop Prob is: {text_condition_drop_prob}') # todo Jan 16
|
| 351 |
+
|
| 352 |
+
null_text = (samples < text_condition_drop_prob) # todo Jan 16
|
| 353 |
+
else:
|
| 354 |
+
null_text = (samples < self.null_condition_probability) # todo Jan 16 [0.1, 0.2, 0.3, 0.4, 0.5]
|
| 355 |
+
|
| 356 |
+
#null_text = (samples < self.null_condition_probability) todo Jan 16 comment it when to use dynamic text condition drop
|
| 357 |
+
text_f[null_text] = self.network.module.empty_string_feat
|
| 358 |
+
|
| 359 |
+
clip_f = clip_f.repeat(2, 1, 1)
|
| 360 |
+
sync_f = sync_f.repeat(2, 1, 1)
|
| 361 |
+
text_f = text_f.repeat(2, 1, 1)
|
| 362 |
+
|
| 363 |
+
pred_v = self.network(xt, clip_f, sync_f, text_f, t)
|
| 364 |
+
|
| 365 |
+
model_loss = self.fm.loss(pred_v, x0, x1)
|
| 366 |
+
|
| 367 |
+
model_losses_w, model_losses_l = model_loss.chunk(2)
|
| 368 |
+
|
| 369 |
+
model_diff = model_losses_w - model_losses_l
|
| 370 |
+
raw_model_loss = 0.5 * (model_losses_w.mean() + model_losses_l.mean())
|
| 371 |
+
|
| 372 |
+
with torch.no_grad():
|
| 373 |
+
ref_pred_v = self.ref_network(xt, clip_f, sync_f, text_f, t)
|
| 374 |
+
ref_loss = self.fm.loss(ref_pred_v, x0, x1)
|
| 375 |
+
ref_losses_w, ref_losses_l = ref_loss.chunk(2)
|
| 376 |
+
ref_diff = ref_losses_w - ref_losses_l
|
| 377 |
+
raw_ref_loss = ref_loss.mean()
|
| 378 |
+
|
| 379 |
+
scale_term = -0.5 * self.beta_dpo
|
| 380 |
+
inside_term = scale_term * (model_diff - ref_diff)
|
| 381 |
+
implicit_acc = (
|
| 382 |
+
scale_term * (model_diff - ref_diff) > 0
|
| 383 |
+
).sum().float() / inside_term.size(0)
|
| 384 |
+
|
| 385 |
+
loss = -1 * F.logsigmoid(inside_term) + model_losses_w
|
| 386 |
+
mean_loss = -1 * F.logsigmoid(inside_term).mean() + model_losses_w.mean()
|
| 387 |
+
|
| 388 |
+
return x1, loss, mean_loss, t.chunk(2)[0], raw_model_loss, raw_ref_loss, implicit_acc
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def val_fn(
|
| 392 |
+
self,
|
| 393 |
+
clip_f: torch.Tensor,
|
| 394 |
+
sync_f: torch.Tensor,
|
| 395 |
+
text_f: torch.Tensor,
|
| 396 |
+
x1: torch.Tensor,
|
| 397 |
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 398 |
+
bs = x1.shape[0] # batch_size * seq_len * num_channels
|
| 399 |
+
# normalize the latents
|
| 400 |
+
x1 = self.network.module.normalize(x1)
|
| 401 |
+
t = log_normal_sample(x1,
|
| 402 |
+
generator=self.rng,
|
| 403 |
+
m=self.log_normal_sampling_mean,
|
| 404 |
+
s=self.log_normal_sampling_scale)
|
| 405 |
+
x0, x1, xt, (clip_f, sync_f, text_f) = self.fm.get_x0_xt_c(x1,
|
| 406 |
+
t,
|
| 407 |
+
Cs=[clip_f, sync_f, text_f],
|
| 408 |
+
generator=self.rng)
|
| 409 |
+
|
| 410 |
+
# classifier-free training
|
| 411 |
+
samples = torch.rand(bs, device=x1.device, generator=self.rng)
|
| 412 |
+
# null mask is for when a video is provided but we decided to ignore it
|
| 413 |
+
null_video = (samples < self.null_condition_probability)
|
| 414 |
+
# complete mask is for when a video is not provided or we decided to ignore it
|
| 415 |
+
clip_f[null_video] = self.network.module.empty_clip_feat
|
| 416 |
+
sync_f[null_video] = self.network.module.empty_sync_feat
|
| 417 |
+
|
| 418 |
+
samples = torch.rand(bs, device=x1.device, generator=self.rng)
|
| 419 |
+
null_text = (samples < self.null_condition_probability)
|
| 420 |
+
text_f[null_text] = self.network.module.empty_string_feat
|
| 421 |
+
|
| 422 |
+
pred_v = self.network(xt, clip_f, sync_f, text_f, t)
|
| 423 |
+
|
| 424 |
+
loss = self.fm.loss(pred_v, x0, x1)
|
| 425 |
+
mean_loss = loss.mean()
|
| 426 |
+
return loss, mean_loss, t
|
| 427 |
+
|
| 428 |
+
def train_pass(self, data, it: int = 0):
|
| 429 |
+
|
| 430 |
+
if not self.for_training:
|
| 431 |
+
raise ValueError('train_pass() should not be called when not training.')
|
| 432 |
+
|
| 433 |
+
self.enter_train()
|
| 434 |
+
with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16):
|
| 435 |
+
clip_f = data['clip_features'].cuda(non_blocking=True)
|
| 436 |
+
sync_f = data['sync_features'].cuda(non_blocking=True)
|
| 437 |
+
text_f = data['text_features'].cuda(non_blocking=True)
|
| 438 |
+
video_exist = data['video_exist'].cuda(non_blocking=True)
|
| 439 |
+
text_exist = data['text_exist'].cuda(non_blocking=True)
|
| 440 |
+
a_mean = data['a_mean'].cuda(non_blocking=True)
|
| 441 |
+
a_std = data['a_std'].cuda(non_blocking=True)
|
| 442 |
+
|
| 443 |
+
# these masks are for non-existent data; masking for CFG training is in train_fn
|
| 444 |
+
clip_f[~video_exist] = self.network.module.empty_clip_feat
|
| 445 |
+
sync_f[~video_exist] = self.network.module.empty_sync_feat
|
| 446 |
+
text_f[~text_exist] = self.network.module.empty_string_feat
|
| 447 |
+
|
| 448 |
+
self.log.data_timer.end()
|
| 449 |
+
if it % self.log_extra_interval == 0:
|
| 450 |
+
unmasked_clip_f = clip_f.clone()
|
| 451 |
+
unmasked_sync_f = sync_f.clone()
|
| 452 |
+
unmasked_text_f = text_f.clone()
|
| 453 |
+
x1, loss, mean_loss, t = self.train_fn(clip_f, sync_f, text_f, a_mean, a_std, it) # todo Jan 16
|
| 454 |
+
|
| 455 |
+
self.train_integrator.add_dict({'loss': mean_loss})
|
| 456 |
+
|
| 457 |
+
if it % self.log_text_interval == 0 and it != 0:
|
| 458 |
+
self.train_integrator.add_scalar('lr', self.scheduler.get_last_lr()[0])
|
| 459 |
+
self.train_integrator.add_binned_tensor('binned_loss', loss, t)
|
| 460 |
+
self.train_integrator.finalize('train', it)
|
| 461 |
+
self.train_integrator.reset_except_hooks()
|
| 462 |
+
|
| 463 |
+
# Backward pass
|
| 464 |
+
self.optimizer.zero_grad(set_to_none=True)
|
| 465 |
+
if self.use_amp:
|
| 466 |
+
self.scaler.scale(mean_loss).backward()
|
| 467 |
+
self.scaler.unscale_(self.optimizer)
|
| 468 |
+
grad_norm = torch.nn.utils.clip_grad_norm_(self.network.parameters(),
|
| 469 |
+
self.clip_grad_norm)
|
| 470 |
+
self.scaler.step(self.optimizer)
|
| 471 |
+
self.scaler.update()
|
| 472 |
+
else:
|
| 473 |
+
mean_loss.backward()
|
| 474 |
+
grad_norm = torch.nn.utils.clip_grad_norm_(self.network.parameters(),
|
| 475 |
+
self.clip_grad_norm)
|
| 476 |
+
self.optimizer.step()
|
| 477 |
+
|
| 478 |
+
if self.ema is not None and it >= self.ema_start:
|
| 479 |
+
self.ema.update()
|
| 480 |
+
self.scheduler.step()
|
| 481 |
+
self.integrator.add_scalar('grad_norm', grad_norm)
|
| 482 |
+
|
| 483 |
+
self.enter_val()
|
| 484 |
+
with torch.amp.autocast('cuda', enabled=self.use_amp,
|
| 485 |
+
dtype=torch.bfloat16), torch.inference_mode():
|
| 486 |
+
try:
|
| 487 |
+
if it % self.log_extra_interval == 0:
|
| 488 |
+
# save GT audio
|
| 489 |
+
# unnormalize the latents
|
| 490 |
+
x1 = self.network.module.unnormalize(x1[0:1])
|
| 491 |
+
mel = self.features.decode(x1)
|
| 492 |
+
audio = self.features.vocode(mel).cpu()[0] # 1 * num_samples
|
| 493 |
+
self.log.log_spectrogram('train', f'spec-gt-r{local_rank}', mel.cpu()[0], it)
|
| 494 |
+
self.log.log_audio('train',
|
| 495 |
+
f'audio-gt-r{local_rank}',
|
| 496 |
+
audio,
|
| 497 |
+
it,
|
| 498 |
+
sample_rate=self.sample_rate)
|
| 499 |
+
|
| 500 |
+
# save audio from sampling
|
| 501 |
+
x0 = torch.empty_like(x1[0:1]).normal_(generator=self.rng)
|
| 502 |
+
clip_f = unmasked_clip_f[0:1]
|
| 503 |
+
sync_f = unmasked_sync_f[0:1]
|
| 504 |
+
text_f = unmasked_text_f[0:1]
|
| 505 |
+
conditions = self.network.module.preprocess_conditions(clip_f, sync_f, text_f)
|
| 506 |
+
empty_conditions = self.network.module.get_empty_conditions(x0.shape[0])
|
| 507 |
+
cfg_ode_wrapper = lambda t, x: self.network.module.ode_wrapper(
|
| 508 |
+
t, x, conditions, empty_conditions, self.cfg_strength)
|
| 509 |
+
x1_hat = self.fm.to_data(cfg_ode_wrapper, x0)
|
| 510 |
+
x1_hat = self.network.module.unnormalize(x1_hat)
|
| 511 |
+
mel = self.features.decode(x1_hat)
|
| 512 |
+
audio = self.features.vocode(mel).cpu()[0]
|
| 513 |
+
self.log.log_spectrogram('train', f'spec-r{local_rank}', mel.cpu()[0], it)
|
| 514 |
+
self.log.log_audio('train',
|
| 515 |
+
f'audio-r{local_rank}',
|
| 516 |
+
audio,
|
| 517 |
+
it,
|
| 518 |
+
sample_rate=self.sample_rate)
|
| 519 |
+
except Exception as e:
|
| 520 |
+
self.log.warning(f'Error in extra logging: {e}')
|
| 521 |
+
if self.cfg.debug:
|
| 522 |
+
raise
|
| 523 |
+
|
| 524 |
+
# Save network weights and checkpoint if needed
|
| 525 |
+
save_copy = it in self.save_copy_iterations
|
| 526 |
+
|
| 527 |
+
if (it % self.save_weights_interval == 0 and it != 0) or save_copy:
|
| 528 |
+
self.save_weights(it)
|
| 529 |
+
|
| 530 |
+
if it % self.save_checkpoint_interval == 0 and it != 0:
|
| 531 |
+
self.save_checkpoint(it, save_copy=save_copy)
|
| 532 |
+
|
| 533 |
+
self.log.data_timer.start()
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def train_dpo_pass(self, data, it: int = 0):
|
| 537 |
+
|
| 538 |
+
if not self.for_training:
|
| 539 |
+
raise ValueError('train_pass() should not be called when not training.')
|
| 540 |
+
|
| 541 |
+
self.enter_train()
|
| 542 |
+
with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16):
|
| 543 |
+
clip_f = data['clip_features'].cuda(non_blocking=True)
|
| 544 |
+
sync_f = data['sync_features'].cuda(non_blocking=True)
|
| 545 |
+
text_f = data['text_features'].cuda(non_blocking=True)
|
| 546 |
+
video_exist = data['video_exist'].cuda(non_blocking=True)
|
| 547 |
+
text_exist = data['text_exist'].cuda(non_blocking=True)
|
| 548 |
+
chosen_a_mean = data['chosen_a_mean'].cuda(non_blocking=True)
|
| 549 |
+
chosen_a_std = data['chosen_a_std'].cuda(non_blocking=True)
|
| 550 |
+
|
| 551 |
+
reject_a_mean = data['reject_a_mean'].cuda(non_blocking=True)
|
| 552 |
+
reject_a_std = data['reject_a_std'].cuda(non_blocking=True)
|
| 553 |
+
|
| 554 |
+
# these masks are for non-existent data; masking for CFG training is in train_fn
|
| 555 |
+
clip_f[~video_exist] = self.network.module.empty_clip_feat
|
| 556 |
+
sync_f[~video_exist] = self.network.module.empty_sync_feat
|
| 557 |
+
text_f[~text_exist] = self.network.module.empty_string_feat
|
| 558 |
+
|
| 559 |
+
self.log.data_timer.end()
|
| 560 |
+
if it % self.log_extra_interval == 0:
|
| 561 |
+
unmasked_clip_f = clip_f.clone()
|
| 562 |
+
unmasked_sync_f = sync_f.clone()
|
| 563 |
+
unmasked_text_f = text_f.clone()
|
| 564 |
+
x1, loss, mean_loss, t, raw_model_loss, raw_ref_loss, implicit_acc = self.train_fn_dpo(clip_f, sync_f, text_f, chosen_a_mean, chosen_a_std, reject_a_mean, reject_a_std)
|
| 565 |
+
|
| 566 |
+
self.train_integrator.add_dict({'loss': mean_loss})
|
| 567 |
+
self.train_integrator.add_dict({'raw_model_loss': raw_model_loss})
|
| 568 |
+
self.train_integrator.add_dict({'raw_ref_loss': raw_ref_loss})
|
| 569 |
+
self.train_integrator.add_dict({'implicit_acc': implicit_acc})
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
if it % self.log_text_interval == 0 and it != 0:
|
| 573 |
+
self.train_integrator.add_scalar('lr', self.scheduler.get_last_lr()[0])
|
| 574 |
+
self.train_integrator.add_binned_tensor('binned_loss', loss, t)
|
| 575 |
+
self.train_integrator.finalize('train', it)
|
| 576 |
+
self.train_integrator.reset_except_hooks()
|
| 577 |
+
|
| 578 |
+
# Backward pass
|
| 579 |
+
self.optimizer.zero_grad(set_to_none=True)
|
| 580 |
+
if self.use_amp:
|
| 581 |
+
self.scaler.scale(mean_loss).backward()
|
| 582 |
+
self.scaler.unscale_(self.optimizer)
|
| 583 |
+
grad_norm = torch.nn.utils.clip_grad_norm_(self.network.parameters(),
|
| 584 |
+
self.clip_grad_norm)
|
| 585 |
+
self.scaler.step(self.optimizer)
|
| 586 |
+
self.scaler.update()
|
| 587 |
+
else:
|
| 588 |
+
mean_loss.backward()
|
| 589 |
+
grad_norm = torch.nn.utils.clip_grad_norm_(self.network.parameters(),
|
| 590 |
+
self.clip_grad_norm)
|
| 591 |
+
self.optimizer.step()
|
| 592 |
+
|
| 593 |
+
if self.ema is not None and it >= self.ema_start:
|
| 594 |
+
self.ema.update()
|
| 595 |
+
self.scheduler.step()
|
| 596 |
+
self.integrator.add_scalar('grad_norm', grad_norm)
|
| 597 |
+
|
| 598 |
+
self.enter_val()
|
| 599 |
+
with torch.amp.autocast('cuda', enabled=self.use_amp,
|
| 600 |
+
dtype=torch.bfloat16), torch.inference_mode():
|
| 601 |
+
try: # todo needs to be changed Mar 2
|
| 602 |
+
if it % self.log_extra_interval == 0:
|
| 603 |
+
# save GT audio
|
| 604 |
+
# unnormalize the latents
|
| 605 |
+
x1_chosen = x1.chunk(2)[0]
|
| 606 |
+
x1_reject = x1.chunk(2)[1]
|
| 607 |
+
# todo need to be changed accordingly
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
x1_chosen, x1_rekject = self.network.module.unnormalize(x1_chosen[0:1]), self.network.module.unnormalize(x1_reject[0:1])
|
| 611 |
+
mel_chosen = self.features.decode(x1_chosen)
|
| 612 |
+
audio_chosen = self.features.vocode(mel_chosen).cpu()[0] # 1 * num_samples
|
| 613 |
+
mel_reject = self.features.decode(x1_reject)
|
| 614 |
+
audio_reject = self.features.vocode(mel_reject).cpu()[0] # 1 * num_samples
|
| 615 |
+
|
| 616 |
+
self.log.log_spectrogram('train-chosen', f'spec-gt-chosen-r{local_rank}', mel_chosen.cpu()[0], it)
|
| 617 |
+
self.log.log_audio('train-chosen',
|
| 618 |
+
f'audio-gt-chosen-r{local_rank}',
|
| 619 |
+
audio_chosen,
|
| 620 |
+
it,
|
| 621 |
+
sample_rate=self.sample_rate)
|
| 622 |
+
|
| 623 |
+
self.log.log_spectrogram('train-reject', f'spec-gt-reject-r{local_rank}', mel_reject.cpu()[0], it)
|
| 624 |
+
self.log.log_audio('train-reject',
|
| 625 |
+
f'audio-gt-reject-r{local_rank}',
|
| 626 |
+
audio_reject,
|
| 627 |
+
it,
|
| 628 |
+
sample_rate=self.sample_rate)
|
| 629 |
+
|
| 630 |
+
# save audio from sampling
|
| 631 |
+
x0_chosen = torch.empty_like(x1_chosen[0:1]).normal_(generator=self.rng)
|
| 632 |
+
x0_reject = torch.empty_like(x1_reject[0:1]).normal_(generator=self.rng)
|
| 633 |
+
clip_f = unmasked_clip_f[0:1]
|
| 634 |
+
sync_f = unmasked_sync_f[0:1]
|
| 635 |
+
text_f = unmasked_text_f[0:1]
|
| 636 |
+
conditions = self.network.module.preprocess_conditions(clip_f, sync_f, text_f)
|
| 637 |
+
empty_conditions = self.network.module.get_empty_conditions(x0_chosen.shape[0])
|
| 638 |
+
cfg_ode_wrapper = lambda t, x: self.network.module.ode_wrapper(
|
| 639 |
+
t, x, conditions, empty_conditions, self.cfg_strength)
|
| 640 |
+
x1_hat_chosen = self.fm.to_data(cfg_ode_wrapper, x0_chosen)
|
| 641 |
+
x1_hat_reject = self.fm.to_data(cfg_ode_wrapper, x0_reject)
|
| 642 |
+
|
| 643 |
+
x1_hat_chosen, x1_hat_reject = self.network.module.unnormalize(x1_hat_chosen), self.network.module.unnormalize(x1_hat_reject)
|
| 644 |
+
mel_chosen = self.features.decode(x1_hat_chosen)
|
| 645 |
+
audio_chosen = self.features.vocode(mel_chosen).cpu()[0]
|
| 646 |
+
|
| 647 |
+
mel_reject = self.features.decode(x1_hat_reject)
|
| 648 |
+
audio_reject = self.features.vocode(mel_reject).cpu()[0]
|
| 649 |
+
|
| 650 |
+
self.log.log_spectrogram('train-chosen', f'spec-chosen-r{local_rank}', mel_chosen.cpu()[0], it)
|
| 651 |
+
self.log.log_audio('train-chosen',
|
| 652 |
+
f'audio-chosen-r{local_rank}',
|
| 653 |
+
audio_chosen,
|
| 654 |
+
it,
|
| 655 |
+
sample_rate=self.sample_rate)
|
| 656 |
+
|
| 657 |
+
self.log.log_spectrogram('train-reject', f'spec-reject-r{local_rank}', mel_reject.cpu()[0], it)
|
| 658 |
+
self.log.log_audio('train-reject',
|
| 659 |
+
f'audio-reject-r{local_rank}',
|
| 660 |
+
audio_reject,
|
| 661 |
+
it,
|
| 662 |
+
sample_rate=self.sample_rate)
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
'''
|
| 666 |
+
x1 = self.network.module.unnormalize(x1[0:1])
|
| 667 |
+
mel = self.features.decode(x1)
|
| 668 |
+
audio = self.features.vocode(mel).cpu()[0] # 1 * num_samples
|
| 669 |
+
self.log.log_spectrogram('train', f'spec-gt-r{local_rank}', mel.cpu()[0], it)
|
| 670 |
+
self.log.log_audio('train',
|
| 671 |
+
f'audio-gt-r{local_rank}',
|
| 672 |
+
audio,
|
| 673 |
+
it,
|
| 674 |
+
sample_rate=self.sample_rate)
|
| 675 |
+
|
| 676 |
+
# save audio from sampling
|
| 677 |
+
x0 = torch.empty_like(x1[0:1]).normal_(generator=self.rng)
|
| 678 |
+
clip_f = unmasked_clip_f[0:1]
|
| 679 |
+
sync_f = unmasked_sync_f[0:1]
|
| 680 |
+
text_f = unmasked_text_f[0:1]
|
| 681 |
+
conditions = self.network.module.preprocess_conditions(clip_f, sync_f, text_f)
|
| 682 |
+
empty_conditions = self.network.module.get_empty_conditions(x0.shape[0])
|
| 683 |
+
cfg_ode_wrapper = lambda t, x: self.network.module.ode_wrapper(
|
| 684 |
+
t, x, conditions, empty_conditions, self.cfg_strength)
|
| 685 |
+
x1_hat = self.fm.to_data(cfg_ode_wrapper, x0)
|
| 686 |
+
x1_hat = self.network.module.unnormalize(x1_hat)
|
| 687 |
+
mel = self.features.decode(x1_hat)
|
| 688 |
+
audio = self.features.vocode(mel).cpu()[0]
|
| 689 |
+
self.log.log_spectrogram('train', f'spec-r{local_rank}', mel.cpu()[0], it)
|
| 690 |
+
self.log.log_audio('train',
|
| 691 |
+
f'audio-r{local_rank}',
|
| 692 |
+
audio,
|
| 693 |
+
it,
|
| 694 |
+
sample_rate=self.sample_rate)'''
|
| 695 |
+
except Exception as e:
|
| 696 |
+
self.log.warning(f'Error in extra logging: {e}')
|
| 697 |
+
if self.cfg.debug:
|
| 698 |
+
raise
|
| 699 |
+
|
| 700 |
+
# Save network weights and checkpoint if needed
|
| 701 |
+
save_copy = it in self.save_copy_iterations
|
| 702 |
+
|
| 703 |
+
if (it % self.save_weights_interval == 0 and it != 0) or save_copy:
|
| 704 |
+
self.save_weights(it)
|
| 705 |
+
|
| 706 |
+
if it % self.save_checkpoint_interval == 0 and it != 0:
|
| 707 |
+
self.save_checkpoint(it, save_copy=save_copy)
|
| 708 |
+
|
| 709 |
+
self.log.data_timer.start()
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
@torch.inference_mode()
|
| 714 |
+
def validation_pass(self, data, it: int = 0):
|
| 715 |
+
self.enter_val()
|
| 716 |
+
with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16):
|
| 717 |
+
clip_f = data['clip_features'].cuda(non_blocking=True)
|
| 718 |
+
sync_f = data['sync_features'].cuda(non_blocking=True)
|
| 719 |
+
text_f = data['text_features'].cuda(non_blocking=True)
|
| 720 |
+
video_exist = data['video_exist'].cuda(non_blocking=True)
|
| 721 |
+
text_exist = data['text_exist'].cuda(non_blocking=True)
|
| 722 |
+
a_mean = data['a_mean'].cuda(non_blocking=True)
|
| 723 |
+
a_std = data['a_std'].cuda(non_blocking=True)
|
| 724 |
+
|
| 725 |
+
clip_f[~video_exist] = self.network.module.empty_clip_feat
|
| 726 |
+
sync_f[~video_exist] = self.network.module.empty_sync_feat
|
| 727 |
+
text_f[~text_exist] = self.network.module.empty_string_feat
|
| 728 |
+
a_randn = torch.empty_like(a_mean).normal_(generator=self.rng)
|
| 729 |
+
x1 = a_mean + a_std * a_randn
|
| 730 |
+
|
| 731 |
+
self.log.data_timer.end()
|
| 732 |
+
loss, mean_loss, t = self.val_fn(clip_f.clone(), sync_f.clone(), text_f.clone(), x1)
|
| 733 |
+
|
| 734 |
+
self.val_integrator.add_binned_tensor('binned_loss', loss, t)
|
| 735 |
+
self.val_integrator.add_dict({'loss': mean_loss})
|
| 736 |
+
|
| 737 |
+
self.log.data_timer.start()
|
| 738 |
+
|
| 739 |
+
@torch.inference_mode()
|
| 740 |
+
def inference_pass(self,
|
| 741 |
+
data,
|
| 742 |
+
it: int,
|
| 743 |
+
data_cfg: DictConfig,
|
| 744 |
+
*,
|
| 745 |
+
save_eval: bool = True) -> Path:
|
| 746 |
+
self.enter_val()
|
| 747 |
+
with torch.amp.autocast('cuda', enabled=self.use_amp, dtype=torch.bfloat16):
|
| 748 |
+
clip_f = data['clip_features'].cuda(non_blocking=True)
|
| 749 |
+
sync_f = data['sync_features'].cuda(non_blocking=True)
|
| 750 |
+
text_f = data['text_features'].cuda(non_blocking=True)
|
| 751 |
+
video_exist = data['video_exist'].cuda(non_blocking=True)
|
| 752 |
+
text_exist = data['text_exist'].cuda(non_blocking=True)
|
| 753 |
+
a_mean = data['a_mean'].cuda(non_blocking=True) # for the shape only
|
| 754 |
+
|
| 755 |
+
clip_f[~video_exist] = self.network.module.empty_clip_feat
|
| 756 |
+
sync_f[~video_exist] = self.network.module.empty_sync_feat
|
| 757 |
+
text_f[~text_exist] = self.network.module.empty_string_feat
|
| 758 |
+
|
| 759 |
+
# sample
|
| 760 |
+
x0 = torch.empty_like(a_mean).normal_(generator=self.rng)
|
| 761 |
+
conditions = self.network.module.preprocess_conditions(clip_f, sync_f, text_f)
|
| 762 |
+
empty_conditions = self.network.module.get_empty_conditions(x0.shape[0])
|
| 763 |
+
cfg_ode_wrapper = lambda t, x: self.network.module.ode_wrapper(
|
| 764 |
+
t, x, conditions, empty_conditions, self.cfg_strength)
|
| 765 |
+
x1_hat = self.fm.to_data(cfg_ode_wrapper, x0)
|
| 766 |
+
x1_hat = self.network.module.unnormalize(x1_hat)
|
| 767 |
+
mel = self.features.decode(x1_hat)
|
| 768 |
+
audio = self.features.vocode(mel).cpu()
|
| 769 |
+
for i in range(audio.shape[0]):
|
| 770 |
+
video_id = data['id'][i]
|
| 771 |
+
if (not self.for_training) and i == 0:
|
| 772 |
+
# save very few videos
|
| 773 |
+
self.test_video_joiner.join(video_id, f'{video_id}', audio[i].transpose(0, 1))
|
| 774 |
+
|
| 775 |
+
if data_cfg.output_subdir is not None:
|
| 776 |
+
# validation
|
| 777 |
+
if save_eval:
|
| 778 |
+
iter_naming = f'{it:09d}'
|
| 779 |
+
else:
|
| 780 |
+
iter_naming = 'val-cache'
|
| 781 |
+
audio_dir = self.log.log_audio(iter_naming,
|
| 782 |
+
f'{video_id}',
|
| 783 |
+
audio[i],
|
| 784 |
+
it=None,
|
| 785 |
+
sample_rate=self.sample_rate,
|
| 786 |
+
subdir=Path(data_cfg.output_subdir))
|
| 787 |
+
if save_eval and i == 0:
|
| 788 |
+
self.val_video_joiner.join(video_id, f'{iter_naming}-{video_id}',
|
| 789 |
+
audio[i].transpose(0, 1))
|
| 790 |
+
else:
|
| 791 |
+
# full test set, usually
|
| 792 |
+
audio_dir = self.log.log_audio(f'{data_cfg.tag}-sampled',
|
| 793 |
+
f'{video_id}',
|
| 794 |
+
audio[i],
|
| 795 |
+
it=None,
|
| 796 |
+
sample_rate=self.sample_rate)
|
| 797 |
+
|
| 798 |
+
return Path(audio_dir)
|
| 799 |
+
|
| 800 |
+
@torch.inference_mode()
|
| 801 |
+
def eval(self, audio_dir: Path, it: int, data_cfg: DictConfig) -> dict[str, float]:
|
| 802 |
+
# rank 0 model only, outside of AMP
|
| 803 |
+
info_if_rank_zero(self.log, 'Eval: entering barrier')
|
| 804 |
+
torch.distributed.barrier()
|
| 805 |
+
info_if_rank_zero(self.log, 'Eval: barrier resolved')
|
| 806 |
+
if local_rank == 0:
|
| 807 |
+
extract(audio_path=audio_dir,
|
| 808 |
+
output_path=audio_dir / 'cache',
|
| 809 |
+
device='cuda',
|
| 810 |
+
batch_size=32,
|
| 811 |
+
audio_length=8)
|
| 812 |
+
output_metrics = evaluate(gt_audio_cache=Path(data_cfg.gt_cache),
|
| 813 |
+
pred_audio_cache=audio_dir / 'cache')
|
| 814 |
+
for k, v in output_metrics.items():
|
| 815 |
+
# pad k to 10 characters
|
| 816 |
+
# pad v to 10 decimal places
|
| 817 |
+
self.log.log_scalar(f'{data_cfg.tag}/{k}', v, it)
|
| 818 |
+
self.log.info(f'{data_cfg.tag}/{k:<10}: {v:.10f}')
|
| 819 |
+
else:
|
| 820 |
+
output_metrics = None
|
| 821 |
+
|
| 822 |
+
return output_metrics
|
| 823 |
+
|
| 824 |
+
def save_weights(self, it, save_copy=False):
|
| 825 |
+
if local_rank != 0:
|
| 826 |
+
return
|
| 827 |
+
|
| 828 |
+
os.makedirs(self.run_path, exist_ok=True)
|
| 829 |
+
if save_copy:
|
| 830 |
+
model_path = self.run_path / f'{self.exp_id}_{it}.pth'
|
| 831 |
+
torch.save(self.network.module.state_dict(), model_path)
|
| 832 |
+
self.log.info(f'Network weights saved to {model_path}.')
|
| 833 |
+
|
| 834 |
+
# if last exists, move it to a shadow copy
|
| 835 |
+
model_path = self.run_path / f'{self.exp_id}_last.pth'
|
| 836 |
+
if model_path.exists():
|
| 837 |
+
shadow_path = model_path.with_name(model_path.name.replace('last', 'shadow'))
|
| 838 |
+
model_path.replace(shadow_path)
|
| 839 |
+
self.log.info(f'Network weights shadowed to {shadow_path}.')
|
| 840 |
+
|
| 841 |
+
torch.save(self.network.module.state_dict(), model_path)
|
| 842 |
+
self.log.info(f'Network weights saved to {model_path}.')
|
| 843 |
+
|
| 844 |
+
def save_checkpoint(self, it, save_copy=False):
|
| 845 |
+
if local_rank != 0:
|
| 846 |
+
return
|
| 847 |
+
|
| 848 |
+
checkpoint = {
|
| 849 |
+
'it': it,
|
| 850 |
+
'weights': self.network.module.state_dict(),
|
| 851 |
+
'optimizer': self.optimizer.state_dict(),
|
| 852 |
+
'scheduler': self.scheduler.state_dict(),
|
| 853 |
+
'ema': self.ema.state_dict() if self.ema is not None else None,
|
| 854 |
+
}
|
| 855 |
+
|
| 856 |
+
os.makedirs(self.run_path, exist_ok=True)
|
| 857 |
+
if save_copy:
|
| 858 |
+
model_path = self.run_path / f'{self.exp_id}_ckpt_{it}.pth'
|
| 859 |
+
torch.save(checkpoint, model_path)
|
| 860 |
+
self.log.info(f'Checkpoint saved to {model_path}.')
|
| 861 |
+
|
| 862 |
+
# if ckpt_last exists, move it to a shadow copy
|
| 863 |
+
model_path = self.run_path / f'{self.exp_id}_ckpt_last.pth'
|
| 864 |
+
if model_path.exists():
|
| 865 |
+
shadow_path = model_path.with_name(model_path.name.replace('last', 'shadow'))
|
| 866 |
+
model_path.replace(shadow_path) # moves the file
|
| 867 |
+
self.log.info(f'Checkpoint shadowed to {shadow_path}.')
|
| 868 |
+
|
| 869 |
+
torch.save(checkpoint, model_path)
|
| 870 |
+
self.log.info(f'Checkpoint saved to {model_path}.')
|
| 871 |
+
|
| 872 |
+
def get_latest_checkpoint_path(self):
|
| 873 |
+
ckpt_path = self.run_path / f'{self.exp_id}_ckpt_last.pth'
|
| 874 |
+
if not ckpt_path.exists():
|
| 875 |
+
info_if_rank_zero(self.log, f'No checkpoint found at {ckpt_path}.')
|
| 876 |
+
return None
|
| 877 |
+
return ckpt_path
|
| 878 |
+
|
| 879 |
+
def get_latest_weight_path(self):
|
| 880 |
+
weight_path = self.run_path / f'{self.exp_id}_last.pth'
|
| 881 |
+
if not weight_path.exists():
|
| 882 |
+
self.log.info(f'No weight found at {weight_path}.')
|
| 883 |
+
return None
|
| 884 |
+
return weight_path
|
| 885 |
+
|
| 886 |
+
def get_final_ema_weight_path(self):
|
| 887 |
+
weight_path = self.run_path / f'{self.exp_id}_ema_final.pth'
|
| 888 |
+
if not weight_path.exists():
|
| 889 |
+
self.log.info(f'No weight found at {weight_path}.')
|
| 890 |
+
return None
|
| 891 |
+
return weight_path
|
| 892 |
+
|
| 893 |
+
def load_checkpoint(self, path):
|
| 894 |
+
# This method loads everything and should be used to resume training
|
| 895 |
+
map_location = 'cuda:%d' % local_rank
|
| 896 |
+
checkpoint = torch.load(path, map_location={'cuda:0': map_location}, weights_only=True)
|
| 897 |
+
|
| 898 |
+
it = checkpoint['it']
|
| 899 |
+
weights = checkpoint['weights']
|
| 900 |
+
optimizer = checkpoint['optimizer']
|
| 901 |
+
scheduler = checkpoint['scheduler']
|
| 902 |
+
if self.ema is not None:
|
| 903 |
+
self.ema.load_state_dict(checkpoint['ema'])
|
| 904 |
+
self.log.info(f'EMA states loaded from step {self.ema.step}')
|
| 905 |
+
|
| 906 |
+
map_location = 'cuda:%d' % local_rank
|
| 907 |
+
self.network.module.load_state_dict(weights)
|
| 908 |
+
self.optimizer.load_state_dict(optimizer)
|
| 909 |
+
self.scheduler.load_state_dict(scheduler)
|
| 910 |
+
|
| 911 |
+
self.log.info(f'Global iteration {it} loaded.')
|
| 912 |
+
self.log.info('Network weights, optimizer states, and scheduler states loaded.')
|
| 913 |
+
|
| 914 |
+
return it
|
| 915 |
+
|
| 916 |
+
def load_weights_in_memory(self, src_dict):
|
| 917 |
+
self.network.module.load_weights(src_dict)
|
| 918 |
+
self.log.info('Network weights loaded from memory.')
|
| 919 |
+
|
| 920 |
+
def load_weights(self, path):
|
| 921 |
+
# This method loads only the network weight and should be used to load a pretrained model
|
| 922 |
+
map_location = 'cuda:%d' % local_rank
|
| 923 |
+
src_dict = torch.load(path, map_location={'cuda:0': map_location}, weights_only=True)
|
| 924 |
+
|
| 925 |
+
self.log.info(f'Importing network weights from {path}...')
|
| 926 |
+
self.load_weights_in_memory(src_dict)
|
| 927 |
+
|
| 928 |
+
def weights(self):
|
| 929 |
+
return self.network.module.state_dict()
|
| 930 |
+
|
| 931 |
+
def enter_train(self):
|
| 932 |
+
self.integrator = self.train_integrator
|
| 933 |
+
self.network.train()
|
| 934 |
+
return self
|
| 935 |
+
|
| 936 |
+
def enter_val(self):
|
| 937 |
+
self.network.eval()
|
| 938 |
+
return self
|
reward_models/av_align.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
AV-Align Metric: Audio-Video Alignment Evaluation
|
| 3 |
+
|
| 4 |
+
AV-Align is a metric for evaluating the alignment between audio and video modalities in multimedia data.
|
| 5 |
+
It assesses synchronization by detecting audio and video peaks and calculating their Intersection over Union (IoU).
|
| 6 |
+
A higher IoU score indicates better alignment.
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
- Provide a folder of video files as input.
|
| 10 |
+
- The script calculates the AV-Align score for the set of videos.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import argparse
|
| 14 |
+
import glob
|
| 15 |
+
import os
|
| 16 |
+
|
| 17 |
+
import cv2
|
| 18 |
+
import librosa
|
| 19 |
+
import librosa.display
|
| 20 |
+
|
| 21 |
+
import multiprocessing
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
import pandas as pd
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Function to extract frames from a video file
|
| 27 |
+
def extract_frames(video_path):
|
| 28 |
+
"""
|
| 29 |
+
Extract frames from a video file.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
video_path (str): Path to the input video file.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
frames (list): List of frames extracted from the video.
|
| 36 |
+
frame_rate (float): Frame rate of the video.
|
| 37 |
+
"""
|
| 38 |
+
frames = []
|
| 39 |
+
cap = cv2.VideoCapture(video_path)
|
| 40 |
+
frame_rate = cap.get(cv2.CAP_PROP_FPS)
|
| 41 |
+
|
| 42 |
+
if not cap.isOpened():
|
| 43 |
+
raise ValueError(f"Error: Unable to open the video file. Wrong video is {video_path}")
|
| 44 |
+
|
| 45 |
+
while True:
|
| 46 |
+
ret, frame = cap.read()
|
| 47 |
+
if not ret:
|
| 48 |
+
break
|
| 49 |
+
frames.append(frame)
|
| 50 |
+
cap.release()
|
| 51 |
+
return frames, frame_rate
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# Function to detect audio peaks using the Onset Detection algorithm
|
| 55 |
+
def detect_audio_peaks(audio_file):
|
| 56 |
+
"""
|
| 57 |
+
Detect audio peaks using the Onset Detection algorithm.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
audio_file (str): Path to the audio file.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
onset_times (list): List of times (in seconds) where audio peaks occur.
|
| 64 |
+
"""
|
| 65 |
+
y, sr = librosa.load(audio_file)
|
| 66 |
+
# Calculate the onset envelope
|
| 67 |
+
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
|
| 68 |
+
# Get the onset events
|
| 69 |
+
onset_frames = librosa.onset.onset_detect(onset_envelope=onset_env, sr=sr)
|
| 70 |
+
onset_times = librosa.frames_to_time(onset_frames, sr=sr)
|
| 71 |
+
return onset_times
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# Function to find local maxima in a list
|
| 75 |
+
def find_local_max_indexes(arr, fps):
|
| 76 |
+
"""
|
| 77 |
+
Find local maxima in a list.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
arr (list): List of values to find local maxima in.
|
| 81 |
+
fps (float): Frames per second, used to convert indexes to time.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
local_extrema_indexes (list): List of times (in seconds) where local maxima occur.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
local_extrema_indexes = []
|
| 88 |
+
n = len(arr)
|
| 89 |
+
for i in range(1, n - 1):
|
| 90 |
+
if arr[i - 1] < arr[i] > arr[i + 1]: # Local maximum
|
| 91 |
+
local_extrema_indexes.append(i / fps)
|
| 92 |
+
|
| 93 |
+
return local_extrema_indexes
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# Function to detect video peaks using Optical Flow
|
| 97 |
+
def detect_video_peaks(frames, fps):
|
| 98 |
+
"""
|
| 99 |
+
Detect video peaks using Optical Flow.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
frames (list): List of video frames.
|
| 103 |
+
fps (float): Frame rate of the video.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
flow_trajectory (list): List of optical flow magnitudes for each frame.
|
| 107 |
+
video_peaks (list): List of times (in seconds) where video peaks occur.
|
| 108 |
+
"""
|
| 109 |
+
flow_trajectory = [compute_of(frames[0], frames[1])] + [compute_of(frames[i - 1], frames[i]) for i in
|
| 110 |
+
range(1, len(frames))]
|
| 111 |
+
video_peaks = find_local_max_indexes(flow_trajectory, fps)
|
| 112 |
+
return flow_trajectory, video_peaks
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# Function to compute the optical flow magnitude between two frames
|
| 116 |
+
def compute_of(img1, img2):
|
| 117 |
+
"""
|
| 118 |
+
Compute the optical flow magnitude between two video frames.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
img1 (numpy.ndarray): First video frame.
|
| 122 |
+
img2 (numpy.ndarray): Second video frame.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
avg_magnitude (float): Average optical flow magnitude for the frame pair.
|
| 126 |
+
"""
|
| 127 |
+
# Calculate the optical flow
|
| 128 |
+
prev_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
|
| 129 |
+
curr_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
|
| 130 |
+
flow = cv2.calcOpticalFlowFarneback(prev_gray, curr_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
|
| 131 |
+
|
| 132 |
+
# Calculate the magnitude of the optical flow vectors
|
| 133 |
+
magnitude = cv2.magnitude(flow[..., 0], flow[..., 1])
|
| 134 |
+
avg_magnitude = cv2.mean(magnitude)[0]
|
| 135 |
+
return avg_magnitude
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# Function to calculate Intersection over Union (IoU) for audio and video peaks
|
| 139 |
+
def calc_intersection_over_union(audio_peaks, video_peaks, fps):
|
| 140 |
+
"""
|
| 141 |
+
Calculate Intersection over Union (IoU) between audio and video peaks.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
audio_peaks (list): List of audio peak times (in seconds).
|
| 145 |
+
video_peaks (list): List of video peak times (in seconds).
|
| 146 |
+
fps (float): Frame rate of the video.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
iou (float): Intersection over Union score.
|
| 150 |
+
"""
|
| 151 |
+
intersection_length = 0
|
| 152 |
+
for audio_peak in audio_peaks:
|
| 153 |
+
for video_peak in video_peaks:
|
| 154 |
+
if video_peak - 1 / fps < audio_peak < video_peak + 1 / fps:
|
| 155 |
+
intersection_length += 1
|
| 156 |
+
break
|
| 157 |
+
return intersection_length / (len(audio_peaks) + len(video_peaks) - intersection_length)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def calculate_av_align_each_pair(video_path, audio_path):
|
| 161 |
+
try:
|
| 162 |
+
frames, fps = extract_frames(video_path)
|
| 163 |
+
audio_peaks = detect_audio_peaks(audio_path)
|
| 164 |
+
flow_trajectory, video_peaks = detect_video_peaks(frames, fps)
|
| 165 |
+
score = calc_intersection_over_union(audio_peaks, video_peaks, fps)
|
| 166 |
+
print(f'Calculated AV Align score for {os.path.basename(video_path)}')
|
| 167 |
+
except:
|
| 168 |
+
score = -1
|
| 169 |
+
print(f'Cannot Calculate AV Align score for {os.path.basename(video_path)}')
|
| 170 |
+
|
| 171 |
+
return score
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def calculate_av_align_samples(video_path, audio_paths):
|
| 175 |
+
"""
|
| 176 |
+
:param video_path: only input the video path for each video id
|
| 177 |
+
:param audio_paths: input the list of generated audio file path
|
| 178 |
+
:return: score: list of av-align score
|
| 179 |
+
"""
|
| 180 |
+
score = []
|
| 181 |
+
frames, fps = extract_frames(video_path)
|
| 182 |
+
flow_trajectory, video_peaks = detect_video_peaks(frames, fps)
|
| 183 |
+
for audio_path in audio_paths:
|
| 184 |
+
audio_peaks = detect_audio_peaks(audio_path)
|
| 185 |
+
each_score = calc_intersection_over_union(audio_peaks, video_peaks, fps)
|
| 186 |
+
score.append(each_score)
|
| 187 |
+
print(f'Calculated AV Align score for {os.path.basename(video_path)}')
|
| 188 |
+
return score
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def multi_wrapper(args):
|
| 192 |
+
return wrapper_cal_av_score(*args)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def wrapper_cal_av_score(video_path, audio_path):
|
| 196 |
+
video_id = os.path.basename(video_path)[:-4]
|
| 197 |
+
try:
|
| 198 |
+
frames, fps = extract_frames(video_path)
|
| 199 |
+
audio_peaks = detect_audio_peaks(audio_path)
|
| 200 |
+
flow_trajectory, video_peaks = detect_video_peaks(frames, fps)
|
| 201 |
+
score = calc_intersection_over_union(audio_peaks, video_peaks, fps)
|
| 202 |
+
print(f'Calculated AV Align score for {os.path.basename(video_path)}')
|
| 203 |
+
except:
|
| 204 |
+
score = -1
|
| 205 |
+
print(f'Cannot Calculate AV Align score for {os.path.basename(video_path)}')
|
| 206 |
+
|
| 207 |
+
data = {
|
| 208 |
+
'video_id': video_id,
|
| 209 |
+
'score': score,
|
| 210 |
+
}
|
| 211 |
+
return data
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
if __name__ == "__main__":
|
| 215 |
+
|
| 216 |
+
threshold = 0.2
|
| 217 |
+
video_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/videos'
|
| 218 |
+
audio_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/audios_vggsound'
|
| 219 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/MMAudio/filter_dataset'
|
| 220 |
+
|
| 221 |
+
audio_file_list = os.listdir(audio_dir)
|
| 222 |
+
|
| 223 |
+
video_audio_list = []
|
| 224 |
+
|
| 225 |
+
for audio_file in tqdm(audio_file_list):
|
| 226 |
+
video_id = audio_file[:-4]
|
| 227 |
+
video_file = video_id + '.mp4'
|
| 228 |
+
video_path = os.path.join(video_dir, video_file)
|
| 229 |
+
audio_path = os.path.join(audio_dir, audio_file)
|
| 230 |
+
video_audio_list.append((video_path, audio_path))
|
| 231 |
+
|
| 232 |
+
cores = multiprocessing.cpu_count()
|
| 233 |
+
pool = multiprocessing.Pool(processes=cores)
|
| 234 |
+
|
| 235 |
+
results = list(tqdm(pool.imap(multi_wrapper, video_audio_list), total=len(video_audio_list)))
|
| 236 |
+
|
| 237 |
+
# with multiprocessing.Pool(processes=cores) as pool:
|
| 238 |
+
# results = pool.imap(multi_wrapper, video_audio_list)
|
| 239 |
+
|
| 240 |
+
output_df = pd.DataFrame(results)
|
| 241 |
+
output_df.to_csv(os.path.join(output_dir, 'vggsound_av_align_score.tsv'), sep='\t', index=False)
|
| 242 |
+
|
| 243 |
+
print("Finished !!!")
|
reward_models/cavp.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import json
|
| 4 |
+
from torch.utils.data import Dataset, DataLoader, DistributedSampler
|
| 5 |
+
from cavp_preprocess.cavp_model import Extract_CAVP_Features
|
| 6 |
+
from argparse import Namespace
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import pandas as pd
|
| 9 |
+
|
| 10 |
+
class VideoAudioDPO_Dataset(Dataset):
|
| 11 |
+
def __init__(self, json_path, root_dir, video_4fps_for_cavp_path):
|
| 12 |
+
|
| 13 |
+
self.root_dir = root_dir
|
| 14 |
+
self.json_path = json_path
|
| 15 |
+
self.video_4fps_for_cavp_path = video_4fps_for_cavp_path
|
| 16 |
+
with open(json_path, 'r') as f:
|
| 17 |
+
self.data = json.load(f)
|
| 18 |
+
|
| 19 |
+
def __len__(self):
|
| 20 |
+
return len(self.data)
|
| 21 |
+
|
| 22 |
+
def __getitem__(self, index):
|
| 23 |
+
each_data = self.data[index]
|
| 24 |
+
num_samples = len(each_data)
|
| 25 |
+
audio_files_list = []
|
| 26 |
+
for i in range(num_samples):
|
| 27 |
+
video_id = each_data[i]['video_id']
|
| 28 |
+
audio_path = os.path.join(self.root_dir, video_id, each_data[i]['audio_path'])
|
| 29 |
+
audio_files_list.append(audio_path)
|
| 30 |
+
|
| 31 |
+
video_4fps_file = os.path.join(self.video_4fps_for_cavp_path, f'{each_data[0]['video_id']}.mp4')
|
| 32 |
+
|
| 33 |
+
return audio_files_list, video_4fps_file
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if __name__ == "__main__":
|
| 37 |
+
|
| 38 |
+
ROOT = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/generated_videos'
|
| 39 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/generated_videos'
|
| 40 |
+
|
| 41 |
+
json_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/generated_videos.json'
|
| 42 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/generated_videos.json'
|
| 43 |
+
|
| 44 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema'
|
| 45 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema'
|
| 46 |
+
|
| 47 |
+
video_4fps_for_cavp_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/videos_4fps_for_cavp'
|
| 48 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/videos_4fps_for_cavp'
|
| 49 |
+
|
| 50 |
+
ckpt_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/video2audio/Frieren-V2A/pretrained_encoders/cavp_epoch66.ckpt'
|
| 51 |
+
|
| 52 |
+
max_duration = 8 # todo
|
| 53 |
+
batch_max_length = int(max_duration * 62.5)
|
| 54 |
+
|
| 55 |
+
audio_args = {
|
| 56 |
+
'audio_sample_rate': 16000,
|
| 57 |
+
'audio_num_mel_bins': 128, # todo
|
| 58 |
+
'fft_size': 1024,
|
| 59 |
+
'win_size': 1024,
|
| 60 |
+
'hop_size': 256,
|
| 61 |
+
'fmin': 0,
|
| 62 |
+
'fmax': 8000,
|
| 63 |
+
'batch_max_length': batch_max_length,
|
| 64 |
+
'mode': 'none', # pad,none,
|
| 65 |
+
}
|
| 66 |
+
audio_args = Namespace(**audio_args)
|
| 67 |
+
cavp_extractor = Extract_CAVP_Features(fps=4,
|
| 68 |
+
batch_size=40,
|
| 69 |
+
device=torch.device("cuda"),
|
| 70 |
+
video_shape=(224, 224),
|
| 71 |
+
ckpt_path=ckpt_path,
|
| 72 |
+
audio_args=audio_args)
|
| 73 |
+
|
| 74 |
+
with open(json_path, 'r') as f:
|
| 75 |
+
data = json.load(f)
|
| 76 |
+
|
| 77 |
+
saved_output_full = []
|
| 78 |
+
saved_output_dpo = []
|
| 79 |
+
|
| 80 |
+
for each_data in tqdm(data):
|
| 81 |
+
num_samples = len(each_data)
|
| 82 |
+
audio_files_list = []
|
| 83 |
+
for i in range(num_samples):
|
| 84 |
+
video_id = each_data[i]['video_id']
|
| 85 |
+
audio_path = os.path.join(ROOT, video_id, each_data[i]['audio_path'])
|
| 86 |
+
audio_files_list.append(audio_path)
|
| 87 |
+
|
| 88 |
+
video_4fps_file_path = os.path.join(video_4fps_for_cavp_path, f'{each_data[0]['video_id']}.mp4')
|
| 89 |
+
|
| 90 |
+
video_features, mel_spec_features_list = cavp_extractor(video_4fps_file_path, audio_files_list)
|
| 91 |
+
#print(video_features.shape)
|
| 92 |
+
#print(mel_spec_features_list[0].shape)
|
| 93 |
+
|
| 94 |
+
scores = []
|
| 95 |
+
for mel_spec_features in mel_spec_features_list:
|
| 96 |
+
each_score = torch.cosine_similarity(video_features, mel_spec_features, dim=-1)
|
| 97 |
+
scores.append(each_score.item())
|
| 98 |
+
|
| 99 |
+
outputs = {
|
| 100 |
+
'id': each_data[0]['video_id'],
|
| 101 |
+
'label': each_data[0]['caption'],
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
for i in range(num_samples):
|
| 105 |
+
outputs[f'{i + 1}'] = scores[i]
|
| 106 |
+
|
| 107 |
+
dpo_outputs = {
|
| 108 |
+
'id': each_data[0]['video_id'],
|
| 109 |
+
'label': each_data[0]['caption'],
|
| 110 |
+
'chosen': scores.index(max(scores)) + 1,
|
| 111 |
+
'reject': scores.index(min(scores)) + 1
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
saved_output_full.append(outputs)
|
| 115 |
+
saved_output_dpo.append(dpo_outputs)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
output_full_df = pd.DataFrame(saved_output_full)
|
| 119 |
+
output_full_df.to_csv(os.path.join(output_dir, 'av_cavp_score.tsv'), sep='\t', index=False)
|
| 120 |
+
|
| 121 |
+
output_dpo_df = pd.DataFrame(saved_output_dpo)
|
| 122 |
+
output_dpo_df.to_csv(os.path.join(output_dir, 'dpo_cavp.tsv'), sep='\t', index=False)
|
| 123 |
+
|
| 124 |
+
print("Finished !!!")
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
|
reward_models/clap.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append("..")
|
| 3 |
+
sys.path.append("../..")
|
| 4 |
+
import os
|
| 5 |
+
import argparse
|
| 6 |
+
import torch
|
| 7 |
+
import av_benchmark.av_bench.laion_clap as laion_clap
|
| 8 |
+
import json
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
@torch.no_grad()
|
| 13 |
+
def compute_clap(model, audio_files, text_data):
|
| 14 |
+
# Compute audio and text embeddings, then compute the dot product (CLAP score)
|
| 15 |
+
audio_embed = model.get_audio_embedding_from_filelist(x=audio_files, use_tensor=True)
|
| 16 |
+
text_embed = model.get_text_embedding(text_data, use_tensor=True)
|
| 17 |
+
return audio_embed @ text_embed.T
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
|
| 22 |
+
#device = 'cuda:0'
|
| 23 |
+
clap_model = laion_clap.CLAP_Module(enable_fusion=False)
|
| 24 |
+
#clap_model.to(device)
|
| 25 |
+
clap_model.load_ckpt()
|
| 26 |
+
clap_model.eval()
|
| 27 |
+
|
| 28 |
+
ROOT = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/generated_videos'
|
| 29 |
+
|
| 30 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/generated_videos'
|
| 31 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/generated_videos'
|
| 32 |
+
|
| 33 |
+
json_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/generated_videos.json'
|
| 34 |
+
|
| 35 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/generated_videos.json'
|
| 36 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/generated_videos.json'
|
| 37 |
+
|
| 38 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema'
|
| 39 |
+
|
| 40 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema'
|
| 41 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema'
|
| 42 |
+
|
| 43 |
+
with open(json_path, 'r') as f:
|
| 44 |
+
data = json.load(f)
|
| 45 |
+
|
| 46 |
+
saved_output_full = []
|
| 47 |
+
saved_output_dpo = []
|
| 48 |
+
|
| 49 |
+
for each_data in tqdm(data):
|
| 50 |
+
num_samples = len(each_data)
|
| 51 |
+
audio_files_list = []
|
| 52 |
+
for i in range(num_samples):
|
| 53 |
+
video_id = each_data[i]['video_id']
|
| 54 |
+
audio_path = os.path.join(ROOT, video_id, each_data[i]['audio_path'])
|
| 55 |
+
audio_files_list.append(audio_path)
|
| 56 |
+
|
| 57 |
+
caption = [each_data[0]['caption']]
|
| 58 |
+
|
| 59 |
+
clap_scores = compute_clap(clap_model, audio_files_list, caption)
|
| 60 |
+
|
| 61 |
+
scores = []
|
| 62 |
+
for each_score in clap_scores:
|
| 63 |
+
scores.append(each_score.item())
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
outputs = {
|
| 67 |
+
'id': each_data[0]['video_id'],
|
| 68 |
+
'label': each_data[0]['caption'],
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
for i in range(num_samples):
|
| 72 |
+
outputs[f'{i + 1}'] = scores[i]
|
| 73 |
+
|
| 74 |
+
dpo_outputs = {
|
| 75 |
+
'id': each_data[0]['video_id'],
|
| 76 |
+
'label': each_data[0]['caption'],
|
| 77 |
+
'chosen': scores.index(max(scores)) + 1,
|
| 78 |
+
'reject': scores.index(min(scores)) + 1
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
saved_output_full.append(outputs)
|
| 82 |
+
saved_output_dpo.append(dpo_outputs)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
output_full_df = pd.DataFrame(saved_output_full)
|
| 87 |
+
output_full_df.to_csv(os.path.join(output_dir, 'clap_score.tsv'), sep='\t', index=False)
|
| 88 |
+
|
| 89 |
+
output_dpo_df = pd.DataFrame(saved_output_dpo)
|
| 90 |
+
output_dpo_df.to_csv(os.path.join(output_dir, 'dpo_clap.tsv'), sep='\t', index=False)
|
| 91 |
+
|
| 92 |
+
print("Finished !!!")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
|
reward_models/clap_multi_gpu.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append("..")
|
| 3 |
+
sys.path.append("../..")
|
| 4 |
+
import os
|
| 5 |
+
import argparse
|
| 6 |
+
import torch
|
| 7 |
+
import av_benchmark.av_bench.laion_clap as laion_clap
|
| 8 |
+
import json
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
import pandas as pd
|
| 11 |
+
import multiprocessing
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
ROOT = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_2000samples_av_align_iter1_for_dpo_inference_ema/generated_videos'
|
| 15 |
+
|
| 16 |
+
# '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/generated_videos'
|
| 17 |
+
|
| 18 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/generated_videos'
|
| 19 |
+
|
| 20 |
+
@torch.no_grad()
|
| 21 |
+
def compute_clap(model, audio_files, text_data):
|
| 22 |
+
# Compute audio and text embeddings, then compute the dot product (CLAP score)
|
| 23 |
+
audio_embed = model.get_audio_embedding_from_filelist(x=audio_files, use_tensor=True)
|
| 24 |
+
text_embed = model.get_text_embedding(text_data, use_tensor=True)
|
| 25 |
+
return audio_embed @ text_embed.T
|
| 26 |
+
|
| 27 |
+
def process_chunk(chunk, gpu_id, return_dict, process_id):
|
| 28 |
+
"""
|
| 29 |
+
Process a chunk of the data on a specific GPU.
|
| 30 |
+
Loads the CLAP model on the designated device, then for each item in the chunk,
|
| 31 |
+
computes the CLAP scores and attaches them to the data.
|
| 32 |
+
"""
|
| 33 |
+
try:
|
| 34 |
+
device = f"cuda:{gpu_id}"
|
| 35 |
+
torch.cuda.set_device(device)
|
| 36 |
+
print(f"Process {process_id}: Using device {device}")
|
| 37 |
+
|
| 38 |
+
# Initialize the CLAP model on this GPU
|
| 39 |
+
model = laion_clap.CLAP_Module(enable_fusion=False)
|
| 40 |
+
model.to(device)
|
| 41 |
+
model.load_ckpt()
|
| 42 |
+
model.eval()
|
| 43 |
+
|
| 44 |
+
saved_output_full = []
|
| 45 |
+
saved_output_dpo = []
|
| 46 |
+
|
| 47 |
+
for j, each_data in enumerate(tqdm(chunk, desc=f"GPU {gpu_id}")):
|
| 48 |
+
# Each item is assumed to be a list of samples.
|
| 49 |
+
# Skip if already computed.
|
| 50 |
+
num_samples = len(each_data)
|
| 51 |
+
|
| 52 |
+
# Collect audio file paths and text data (using the first caption)
|
| 53 |
+
audio_files_list = []
|
| 54 |
+
for i in range(num_samples):
|
| 55 |
+
video_id = each_data[i]['video_id']
|
| 56 |
+
audio_path = os.path.join(ROOT, video_id, each_data[i]['audio_path'])
|
| 57 |
+
audio_files_list.append(audio_path)
|
| 58 |
+
|
| 59 |
+
text_data = [each_data[0]['caption']]
|
| 60 |
+
|
| 61 |
+
try:
|
| 62 |
+
clap_scores = compute_clap(model, audio_files_list, text_data)
|
| 63 |
+
except Exception as e:
|
| 64 |
+
print(f"Error processing item index {j} on GPU {gpu_id}: {e}")
|
| 65 |
+
continue
|
| 66 |
+
|
| 67 |
+
scores = []
|
| 68 |
+
for each_score in clap_scores:
|
| 69 |
+
scores.append(each_score.item())
|
| 70 |
+
|
| 71 |
+
outputs = {
|
| 72 |
+
'id': each_data[0]['video_id'],
|
| 73 |
+
'label': each_data[0]['caption'],
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
for i in range(num_samples):
|
| 77 |
+
outputs[f'{i + 1}'] = scores[i]
|
| 78 |
+
|
| 79 |
+
dpo_outputs = {
|
| 80 |
+
'id': each_data[0]['video_id'],
|
| 81 |
+
'label': each_data[0]['caption'],
|
| 82 |
+
'chosen': scores.index(max(scores)) + 1,
|
| 83 |
+
'reject': scores.index(min(scores)) + 1
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
saved_output_full.append(outputs)
|
| 87 |
+
saved_output_dpo.append(dpo_outputs)
|
| 88 |
+
|
| 89 |
+
# Attach the computed score to each sample in the item
|
| 90 |
+
# for k in range(num_samples):
|
| 91 |
+
# each_data[k]['clap_score'] = clap_scores[k].item()
|
| 92 |
+
|
| 93 |
+
return_dict[process_id] = [saved_output_full, saved_output_dpo]
|
| 94 |
+
print(f"Process {process_id}: Completed processing on GPU {gpu_id}")
|
| 95 |
+
except Exception as e:
|
| 96 |
+
print(f"Process {process_id}: Error on GPU {gpu_id}: {e}")
|
| 97 |
+
return_dict[process_id] = []
|
| 98 |
+
|
| 99 |
+
def split_into_chunks(data, num_chunks):
|
| 100 |
+
"""
|
| 101 |
+
Splits data into num_chunks approximately equal parts.
|
| 102 |
+
"""
|
| 103 |
+
avg = len(data) // num_chunks
|
| 104 |
+
chunks = []
|
| 105 |
+
for i in range(num_chunks):
|
| 106 |
+
start = i * avg
|
| 107 |
+
# Ensure the last chunk takes the remainder of the data
|
| 108 |
+
end = (i + 1) * avg if i != num_chunks - 1 else len(data)
|
| 109 |
+
chunks.append(data[start:end])
|
| 110 |
+
return chunks
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
if __name__ == "__main__":
|
| 114 |
+
|
| 115 |
+
multiprocessing.set_start_method('spawn')
|
| 116 |
+
#
|
| 117 |
+
json_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_2000samples_av_align_iter1_for_dpo_inference_ema/generated_videos.json'
|
| 118 |
+
|
| 119 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/generated_videos.json'
|
| 120 |
+
|
| 121 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/generated_videos.json'
|
| 122 |
+
|
| 123 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_2000samples_av_align_iter1_for_dpo_inference_ema'
|
| 124 |
+
|
| 125 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema'
|
| 126 |
+
|
| 127 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema'
|
| 128 |
+
|
| 129 |
+
with open(json_path, 'r') as f:
|
| 130 |
+
data = json.load(f)
|
| 131 |
+
|
| 132 |
+
# Check GPU availability and split data accordingly
|
| 133 |
+
num_gpus = torch.cuda.device_count()
|
| 134 |
+
|
| 135 |
+
print(f"Found {num_gpus} GPUs. Splitting data into {num_gpus} chunks.")
|
| 136 |
+
chunks = split_into_chunks(data, num_gpus)
|
| 137 |
+
|
| 138 |
+
# Create a manager dict to collect results from all processes
|
| 139 |
+
manager = multiprocessing.Manager()
|
| 140 |
+
return_dict = manager.dict()
|
| 141 |
+
processes = []
|
| 142 |
+
|
| 143 |
+
for i in range(num_gpus):
|
| 144 |
+
p = multiprocessing.Process(
|
| 145 |
+
target=process_chunk,
|
| 146 |
+
args=(chunks[i], i, return_dict, i)
|
| 147 |
+
)
|
| 148 |
+
processes.append(p)
|
| 149 |
+
p.start()
|
| 150 |
+
print(f"Started process {i} on GPU {i}")
|
| 151 |
+
|
| 152 |
+
for p in processes:
|
| 153 |
+
p.join()
|
| 154 |
+
print(f"Process {p.pid} has finished.")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
# Aggregate all chunks back into a single list
|
| 158 |
+
combined_data_full = []
|
| 159 |
+
combined_data_dpo = []
|
| 160 |
+
for i in range(num_gpus):
|
| 161 |
+
combined_data_full.extend(return_dict[i][0])
|
| 162 |
+
combined_data_dpo.extend(return_dict[i][1])
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
# saved_output_full = []
|
| 166 |
+
# saved_output_dpo = []
|
| 167 |
+
|
| 168 |
+
# for each_data in tqdm(data):
|
| 169 |
+
# num_samples = len(each_data)
|
| 170 |
+
# audio_files_list = []
|
| 171 |
+
# for i in range(num_samples):
|
| 172 |
+
# video_id = each_data[i]['video_id']
|
| 173 |
+
# audio_path = os.path.join(ROOT, video_id, each_data[i]['audio_path'])
|
| 174 |
+
# audio_files_list.append(audio_path)
|
| 175 |
+
|
| 176 |
+
# caption = [each_data[0]['caption']]
|
| 177 |
+
|
| 178 |
+
# clap_scores = compute_clap(clap_model, audio_files_list, caption)
|
| 179 |
+
|
| 180 |
+
# scores = []
|
| 181 |
+
# for each_score in clap_scores:
|
| 182 |
+
# scores.append(each_score.item())
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
# outputs = {
|
| 186 |
+
# 'id': each_data[0]['video_id'],
|
| 187 |
+
# 'label': each_data[0]['caption'],
|
| 188 |
+
# }
|
| 189 |
+
|
| 190 |
+
# for i in range(num_samples):
|
| 191 |
+
# outputs[f'{i + 1}'] = scores[i]
|
| 192 |
+
|
| 193 |
+
# dpo_outputs = {
|
| 194 |
+
# 'id': each_data[0]['video_id'],
|
| 195 |
+
# 'label': each_data[0]['caption'],
|
| 196 |
+
# 'chosen': scores.index(max(scores)) + 1,
|
| 197 |
+
# 'reject': scores.index(min(scores)) + 1
|
| 198 |
+
# }
|
| 199 |
+
|
| 200 |
+
# saved_output_full.append(outputs)
|
| 201 |
+
# saved_output_dpo.append(dpo_outputs)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
output_full_df = pd.DataFrame(combined_data_full)
|
| 206 |
+
output_full_df.to_csv(os.path.join(output_dir, 'clap_score.tsv'), sep='\t', index=False)
|
| 207 |
+
|
| 208 |
+
output_dpo_df = pd.DataFrame(combined_data_dpo)
|
| 209 |
+
output_dpo_df.to_csv(os.path.join(output_dir, 'dpo_clap.tsv'), sep='\t', index=False)
|
| 210 |
+
|
| 211 |
+
print("Finished !!!")
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
|
reward_models/ib_at_sync.sh
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# for val set
|
| 3 |
+
json_path=/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/generated_videos.json
|
| 4 |
+
video_path=/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/generated_videos
|
| 5 |
+
output_dir=/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema
|
| 6 |
+
gt_batch_size=8
|
| 7 |
+
|
| 8 |
+
python ib_at_sync.py --video_path ${video_path} --json_path ${json_path} --output_dir ${output_dir} --gt_batch_size ${gt_batch_size} --audio_length=8
|
reward_models/ib_sync.py
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from colorlog import ColoredFormatter
|
| 7 |
+
from einops import rearrange
|
| 8 |
+
|
| 9 |
+
from ib_sync_rewards.imagebind.models import imagebind_model
|
| 10 |
+
from ib_sync_rewards.imagebind.models.imagebind_model import ModalityType
|
| 11 |
+
|
| 12 |
+
from ib_sync_rewards.imagebind.models.multimodal_preprocessors import SimpleTokenizer
|
| 13 |
+
from torch.utils.data import DataLoader
|
| 14 |
+
from tqdm import tqdm
|
| 15 |
+
|
| 16 |
+
from ib_sync_rewards.args import get_eval_parser
|
| 17 |
+
from ib_sync_rewards.data.video_dataset import VideoDataset, pad_or_truncate, error_avoidance_collate
|
| 18 |
+
from ib_sync_rewards.synchformer.synchformer import Synchformer, make_class_grid
|
| 19 |
+
|
| 20 |
+
import torchaudio
|
| 21 |
+
import json
|
| 22 |
+
import pandas as pd
|
| 23 |
+
|
| 24 |
+
_syncformer_ckpt_path = Path(__file__).parent / 'ib_sync_rewards' / 'weights' / 'synchformer_state_dict.pth'
|
| 25 |
+
log = logging.getLogger()
|
| 26 |
+
device = 'cuda'
|
| 27 |
+
|
| 28 |
+
LOGFORMAT = "[%(log_color)s%(levelname)-8s%(reset)s]: %(log_color)s%(message)s%(reset)s"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def setup_eval_logging(log_level: int = logging.INFO):
|
| 32 |
+
logging.root.setLevel(log_level)
|
| 33 |
+
formatter = ColoredFormatter(LOGFORMAT)
|
| 34 |
+
stream = logging.StreamHandler()
|
| 35 |
+
stream.setLevel(log_level)
|
| 36 |
+
stream.setFormatter(formatter)
|
| 37 |
+
log = logging.getLogger()
|
| 38 |
+
log.setLevel(log_level)
|
| 39 |
+
log.addHandler(stream)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
setup_eval_logging()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def encode_video_with_sync(synchformer: Synchformer, x: torch.Tensor) -> torch.Tensor:
|
| 46 |
+
# x: (B, T, C, H, W) H/W: 224
|
| 47 |
+
|
| 48 |
+
b, t, c, h, w = x.shape
|
| 49 |
+
assert c == 3 and h == 224 and w == 224
|
| 50 |
+
|
| 51 |
+
# partition the video
|
| 52 |
+
segment_size = 16
|
| 53 |
+
step_size = 8
|
| 54 |
+
num_segments = (t - segment_size) // step_size + 1
|
| 55 |
+
segments = []
|
| 56 |
+
for i in range(num_segments):
|
| 57 |
+
segments.append(x[:, i * step_size:i * step_size + segment_size])
|
| 58 |
+
x = torch.stack(segments, dim=1) # (B, S, T, C, H, W)
|
| 59 |
+
|
| 60 |
+
x = rearrange(x, 'b s t c h w -> (b s) 1 t c h w')
|
| 61 |
+
x = synchformer.extract_vfeats(x)
|
| 62 |
+
x = rearrange(x, '(b s) 1 t d -> b s t d', b=b)
|
| 63 |
+
return x
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def encode_video_with_imagebind(imagebind: imagebind_model, x: torch.Tensor) -> torch.Tensor:
|
| 67 |
+
# x: B * NUM_CROPS * T * C * H * W
|
| 68 |
+
clips = []
|
| 69 |
+
b, num_crops, t, c, h, w = x.shape
|
| 70 |
+
for i in range(t - 1):
|
| 71 |
+
clips.append(x[:, :, i:i + 2])
|
| 72 |
+
clips = torch.cat(clips, dim=1)
|
| 73 |
+
|
| 74 |
+
# clips: B * (NUM_CROPS * NUM_CLIPS) * 2 * C * H * W
|
| 75 |
+
clips = rearrange(clips, 'b n t c h w -> b n c t h w')
|
| 76 |
+
|
| 77 |
+
emb = imagebind({ModalityType.VISION: clips})
|
| 78 |
+
return emb[ModalityType.VISION]
|
| 79 |
+
|
| 80 |
+
def encode_audio_with_sync(synchformer: Synchformer, x: torch.Tensor,
|
| 81 |
+
mel: torchaudio.transforms.MelSpectrogram) -> torch.Tensor:
|
| 82 |
+
b, t = x.shape
|
| 83 |
+
|
| 84 |
+
# partition the video
|
| 85 |
+
segment_size = 10240
|
| 86 |
+
step_size = 10240 // 2
|
| 87 |
+
num_segments = (t - segment_size) // step_size + 1
|
| 88 |
+
segments = []
|
| 89 |
+
for i in range(num_segments):
|
| 90 |
+
segments.append(x[:, i * step_size:i * step_size + segment_size])
|
| 91 |
+
x = torch.stack(segments, dim=1) # (B, S, T, C, H, W)
|
| 92 |
+
|
| 93 |
+
x = mel(x)
|
| 94 |
+
x = torch.log(x + 1e-6)
|
| 95 |
+
x = pad_or_truncate(x, 66)
|
| 96 |
+
|
| 97 |
+
mean = -4.2677393
|
| 98 |
+
std = 4.5689974
|
| 99 |
+
x = (x - mean) / (2 * std)
|
| 100 |
+
# x: B * S * 128 * 66
|
| 101 |
+
x = synchformer.extract_afeats(x.unsqueeze(2))
|
| 102 |
+
return x
|
| 103 |
+
|
| 104 |
+
@torch.inference_mode()
|
| 105 |
+
def extract(args):
|
| 106 |
+
video_path: Path = args.video_path.expanduser() # todo video_folder_path
|
| 107 |
+
json_path: Path = args.json_path # todo path to the json file
|
| 108 |
+
output_dir: Path = args.output_dir # todo path to the save the tsv file
|
| 109 |
+
audio_length: float = args.audio_length
|
| 110 |
+
num_workers: int = args.num_workers
|
| 111 |
+
batch_size: int = args.gt_batch_size
|
| 112 |
+
|
| 113 |
+
log.info('Extracting features...')
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
with open(json_path, 'r') as f:
|
| 117 |
+
data = json.load(f)
|
| 118 |
+
|
| 119 |
+
video_id_caption_dict = {}
|
| 120 |
+
for each_data in data:
|
| 121 |
+
video_id = each_data[0]['video_id']
|
| 122 |
+
caption = each_data[0]['caption']
|
| 123 |
+
video_id_caption_dict[video_id] = caption
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
# todo read all the file names
|
| 127 |
+
video_names = os.listdir(video_path)
|
| 128 |
+
video_name_paths = [video_path / f for f in video_names] # todo ./video_path/video_name
|
| 129 |
+
|
| 130 |
+
samples_per_video = sorted(list(map(int, os.listdir(video_name_paths[0]))))
|
| 131 |
+
samples_per_video = list(map(str, samples_per_video)) # todo [1, 2, ..., 10]
|
| 132 |
+
|
| 133 |
+
#samples_per_video = os.listdir(video_name_paths[0]) # todo 1,2,...,10 should be sorted?
|
| 134 |
+
|
| 135 |
+
log.info(f'{len(video_name_paths)} videos found.')
|
| 136 |
+
log.info(f'{len(samples_per_video)} samples are found in each video.')
|
| 137 |
+
|
| 138 |
+
# todo load pre-trained weights for Synchformer
|
| 139 |
+
sync_model = Synchformer().to(device).eval()
|
| 140 |
+
sd = torch.load(_syncformer_ckpt_path, weights_only=True)
|
| 141 |
+
sync_model.load_state_dict(sd)
|
| 142 |
+
|
| 143 |
+
cmp_encode_video_with_sync = encode_video_with_sync # torch.compile(encode_video_with_sync)
|
| 144 |
+
cmp_encode_video_with_imagebind = encode_video_with_imagebind # torch.compile(encode_video_with_imagebind)
|
| 145 |
+
cmp_encode_audio_with_sync = encode_audio_with_sync
|
| 146 |
+
|
| 147 |
+
# todo load pre-trained weights for ImageBind
|
| 148 |
+
imagebind = imagebind_model.imagebind_huge(pretrained=True).to(device).eval()
|
| 149 |
+
# todo May 9
|
| 150 |
+
tokenizer = SimpleTokenizer(bpe_path='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/reward_models/ib_sync_rewards/imagebind/bpe/bpe_simple_vocab_16e6.txt.gz')
|
| 151 |
+
|
| 152 |
+
total_outputs = []
|
| 153 |
+
for sample_idx in samples_per_video:
|
| 154 |
+
log.info(f'Starting to extracting features in sample index: {sample_idx}')
|
| 155 |
+
video_paths = [f / sample_idx / f'{os.path.basename(f)}.mp4' for f in video_name_paths]
|
| 156 |
+
audio_paths = [f / sample_idx / f'{os.path.basename(f)}.flac' for f in video_name_paths]
|
| 157 |
+
|
| 158 |
+
log.info(f'{len(video_paths)} videos found.')
|
| 159 |
+
|
| 160 |
+
dataset = VideoDataset(video_paths, audio_paths, duration_sec=audio_length, video_id_caption=video_id_caption_dict) # todo
|
| 161 |
+
loader = DataLoader(dataset,
|
| 162 |
+
batch_size=batch_size,
|
| 163 |
+
num_workers=num_workers,
|
| 164 |
+
collate_fn=error_avoidance_collate
|
| 165 |
+
) # collate_fn=error_avoidance_collate
|
| 166 |
+
|
| 167 |
+
output_for_each_sample_idx_dict = {}
|
| 168 |
+
|
| 169 |
+
for data in tqdm(loader):
|
| 170 |
+
names = data['name']
|
| 171 |
+
# todo for video
|
| 172 |
+
ib_video = data['ib_video'].to(device)
|
| 173 |
+
sync_video = data['sync_video'].to(device)
|
| 174 |
+
|
| 175 |
+
# todo for audio
|
| 176 |
+
ib_audio = data['ib_audio'].squeeze(1).to(device)
|
| 177 |
+
sync_audio = data['sync_audio'].to(device)
|
| 178 |
+
|
| 179 |
+
# todo for text feature extraction. # todo May 9
|
| 180 |
+
ib_text = data['label']
|
| 181 |
+
ib_text_tokens = [tokenizer(t).unsqueeze(0).to(device) for t in ib_text]
|
| 182 |
+
ib_text_tokens = torch.cat(ib_text_tokens, dim=0)
|
| 183 |
+
#ib_text_tokens = tokenizer(ib_text).to(device)
|
| 184 |
+
ib_text_features = imagebind({ModalityType.TEXT: ib_text_tokens})[ModalityType.TEXT].cpu().detach()
|
| 185 |
+
|
| 186 |
+
# todo for video feature extraction
|
| 187 |
+
sync_video_features = cmp_encode_video_with_sync(sync_model, sync_video)
|
| 188 |
+
ib_video_features = cmp_encode_video_with_imagebind(imagebind, ib_video)
|
| 189 |
+
ib_video_features = ib_video_features.cpu().detach()
|
| 190 |
+
|
| 191 |
+
# todo for audio feature extraction
|
| 192 |
+
ib_audio_features = imagebind({ModalityType.AUDIO: ib_audio})[ModalityType.AUDIO].cpu().detach()
|
| 193 |
+
sync_audio_features = cmp_encode_audio_with_sync(sync_model, sync_audio, dataset.sync_mel_spectrogram.to(device))
|
| 194 |
+
|
| 195 |
+
# calculate imagebind_av metrics
|
| 196 |
+
ib_av_scores = torch.cosine_similarity(ib_video_features, ib_audio_features, dim=-1) # [b]
|
| 197 |
+
|
| 198 |
+
# calculate imagebind_at metrics # todo May 9
|
| 199 |
+
ib_at_scores = torch.cosine_similarity(ib_text_features, ib_audio_features, dim=-1)
|
| 200 |
+
|
| 201 |
+
# calculate desync metrics todo
|
| 202 |
+
total_sync_scores = []
|
| 203 |
+
sync_grid = make_class_grid(-2, 2, 21)
|
| 204 |
+
logits_1 = sync_model.compare_v_a(sync_video_features[:, :14], sync_audio_features[:, :14])
|
| 205 |
+
top_id_1 = torch.argmax(logits_1, dim=-1).cpu().numpy()
|
| 206 |
+
|
| 207 |
+
logits_2 = sync_model.compare_v_a(sync_video_features[:, -14:], sync_audio_features[:, -14:])
|
| 208 |
+
top_id_2 = torch.argmax(logits_2, dim=-1).cpu().numpy()
|
| 209 |
+
|
| 210 |
+
for j in range(sync_video_features.shape[0]):
|
| 211 |
+
total_sync_scores.append(0.5 * (abs(sync_grid[top_id_1[j]].item()) + abs(sync_grid[top_id_2[j]].item())))
|
| 212 |
+
|
| 213 |
+
for i, n in enumerate(names):
|
| 214 |
+
each_output = {
|
| 215 |
+
'id': n,
|
| 216 |
+
'label': video_id_caption_dict[n],
|
| 217 |
+
'ib_av_score' : ib_av_scores[i].item(),
|
| 218 |
+
'av_sync_score': total_sync_scores[i],
|
| 219 |
+
'ib_at_score': ib_at_scores[i].item()
|
| 220 |
+
}
|
| 221 |
+
output_for_each_sample_idx_dict[n] = each_output
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
total_outputs.append(output_for_each_sample_idx_dict)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
# todo combine different sample_idx splits
|
| 228 |
+
log.info('Combining and Saving Metrics...')
|
| 229 |
+
saved_ib_output_full = []
|
| 230 |
+
saved_sync_output_full = []
|
| 231 |
+
saved_ib_at_output_full = [] # todo May 9
|
| 232 |
+
saved_ib_output_dpo = []
|
| 233 |
+
saved_sync_output_dpo = []
|
| 234 |
+
saved_ib_at_output_dpo = [] # todo May 9
|
| 235 |
+
|
| 236 |
+
video_id_list = total_outputs[0].keys()
|
| 237 |
+
for video_id in tqdm(video_id_list):
|
| 238 |
+
outputs_ib_metrics = {
|
| 239 |
+
'id': video_id,
|
| 240 |
+
'label': video_id_caption_dict[video_id]
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
outputs_sync_metrics = {
|
| 244 |
+
'id': video_id,
|
| 245 |
+
'label': video_id_caption_dict[video_id]
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
# todo May 9
|
| 249 |
+
outputs_ib_at_metrics = {
|
| 250 |
+
'id': video_id,
|
| 251 |
+
'label': video_id_caption_dict[video_id]
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
ib_scores_for_each_video = []
|
| 255 |
+
sync_scores_for_each_video = []
|
| 256 |
+
ib_at_scores_for_each_video = [] # todo May 9
|
| 257 |
+
|
| 258 |
+
for idx, each_sample_idx_dict in enumerate(total_outputs):
|
| 259 |
+
ib_scores_for_each_video.append(each_sample_idx_dict[video_id]['ib_av_score'])
|
| 260 |
+
outputs_ib_metrics[str(idx+1)] = each_sample_idx_dict[video_id]['ib_av_score']
|
| 261 |
+
|
| 262 |
+
sync_scores_for_each_video.append(each_sample_idx_dict[video_id]['av_sync_score'])
|
| 263 |
+
outputs_sync_metrics[str(idx+1)] = each_sample_idx_dict[video_id]['av_sync_score']
|
| 264 |
+
|
| 265 |
+
# todo May 9
|
| 266 |
+
ib_at_scores_for_each_video.append(each_sample_idx_dict[video_id]['ib_at_score'])
|
| 267 |
+
outputs_ib_at_metrics[str(idx+1)] = each_sample_idx_dict[video_id]['ib_at_score']
|
| 268 |
+
|
| 269 |
+
outputs_ib_dpo = {
|
| 270 |
+
'id': video_id,
|
| 271 |
+
'label': video_id_caption_dict[video_id],
|
| 272 |
+
'chosen': ib_scores_for_each_video.index(max(ib_scores_for_each_video)) + 1,
|
| 273 |
+
'reject': ib_scores_for_each_video.index(min(ib_scores_for_each_video)) + 1
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
outputs_sync_dpo = {
|
| 277 |
+
'id': video_id,
|
| 278 |
+
'label': video_id_caption_dict[video_id],
|
| 279 |
+
'chosen': sync_scores_for_each_video.index(min(sync_scores_for_each_video)) + 1,
|
| 280 |
+
'reject': sync_scores_for_each_video.index(max(sync_scores_for_each_video)) + 1
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
# todo May 9
|
| 284 |
+
outputs_ib_at_dpo = {
|
| 285 |
+
'id': video_id,
|
| 286 |
+
'label': video_id_caption_dict[video_id],
|
| 287 |
+
'chosen': ib_at_scores_for_each_video.index(max(ib_at_scores_for_each_video)) + 1,
|
| 288 |
+
'reject': ib_at_scores_for_each_video.index(min(ib_at_scores_for_each_video)) + 1
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
saved_ib_output_full.append(outputs_ib_metrics)
|
| 292 |
+
saved_sync_output_full.append(outputs_sync_metrics)
|
| 293 |
+
saved_ib_at_output_full.append(outputs_ib_at_metrics) # todo May 9
|
| 294 |
+
|
| 295 |
+
saved_ib_output_dpo.append(outputs_ib_dpo)
|
| 296 |
+
saved_sync_output_dpo.append(outputs_sync_dpo)
|
| 297 |
+
saved_ib_at_output_dpo.append(outputs_ib_at_dpo) # todo May 9
|
| 298 |
+
|
| 299 |
+
output_ib_full_df = pd.DataFrame(saved_ib_output_full)
|
| 300 |
+
output_ib_full_df.to_csv(os.path.join(output_dir, 'imagebind_score.tsv'), sep='\t', index=False)
|
| 301 |
+
|
| 302 |
+
output_ib_dpo_df = pd.DataFrame(saved_ib_output_dpo)
|
| 303 |
+
output_ib_dpo_df.to_csv(os.path.join(output_dir, 'dpo_imagebind.tsv'), sep='\t', index=False)
|
| 304 |
+
|
| 305 |
+
output_sync_full_df = pd.DataFrame(saved_sync_output_full)
|
| 306 |
+
output_sync_full_df.to_csv(os.path.join(output_dir, 'desync_score.tsv'), sep='\t', index=False)
|
| 307 |
+
|
| 308 |
+
output_sync_dpo_df = pd.DataFrame(saved_sync_output_dpo)
|
| 309 |
+
output_sync_dpo_df.to_csv(os.path.join(output_dir, 'dpo_desync.tsv'), sep='\t', index=False)
|
| 310 |
+
|
| 311 |
+
# todo May 9
|
| 312 |
+
output_ib_at_full_df = pd.DataFrame(saved_ib_at_output_full)
|
| 313 |
+
output_ib_at_full_df.to_csv(os.path.join(output_dir, 'imagebind_at_score.tsv'), sep='\t', index=False)
|
| 314 |
+
|
| 315 |
+
output_ib_at_dpo_df = pd.DataFrame(saved_ib_at_output_dpo)
|
| 316 |
+
output_ib_at_dpo_df.to_csv(os.path.join(output_dir, 'dpo_imagebind_at.tsv'), sep='\t', index=False)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
if __name__ == '__main__':
|
| 320 |
+
logging.basicConfig(level=logging.INFO)
|
| 321 |
+
|
| 322 |
+
parser = get_eval_parser()
|
| 323 |
+
parser.add_argument('--video_path', type=Path, required=True, help='Path to the video files')
|
| 324 |
+
args = parser.parse_args()
|
| 325 |
+
extract(args)
|
reward_models/ib_sync.sh
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# for val set
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
json_path='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/generated_videos.json'
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_inference_ema/generated_videos.json'
|
| 10 |
+
|
| 11 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_inference_ema/generated_videos.json'
|
| 12 |
+
|
| 13 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_full_reward_ib_desync_iter2_for_dpo_inference_ema/generated_videos.json'
|
| 14 |
+
|
| 15 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_desync_to_ib_iter1_for_dpo_inference_ema/generated_videos.json'
|
| 16 |
+
|
| 17 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/generated_videos.json'
|
| 18 |
+
#/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/generated_videos.json
|
| 19 |
+
|
| 20 |
+
video_path='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema/generated_videos'
|
| 21 |
+
|
| 22 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_inference_ema/generated_videos'
|
| 23 |
+
|
| 24 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_inference_ema/generated_videos'
|
| 25 |
+
|
| 26 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_full_reward_ib_desync_iter2_for_dpo_inference_ema/generated_videos'
|
| 27 |
+
|
| 28 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_desync_to_ib_iter1_for_dpo_inference_ema/generated_videos'
|
| 29 |
+
|
| 30 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/generated_videos'
|
| 31 |
+
#/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/generated_videos
|
| 32 |
+
|
| 33 |
+
output_dir='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema'
|
| 34 |
+
|
| 35 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_inference_ema'
|
| 36 |
+
|
| 37 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_inference_ema'
|
| 38 |
+
|
| 39 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_full_reward_ib_desync_iter2_for_dpo_inference_ema'
|
| 40 |
+
|
| 41 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_desync_to_ib_iter1_for_dpo_inference_ema'
|
| 42 |
+
|
| 43 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema'
|
| 44 |
+
#/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema
|
| 45 |
+
gt_batch_size=8
|
| 46 |
+
|
| 47 |
+
python ib_sync.py --video_path ${video_path} --json_path ${json_path} --output_dir ${output_dir} --gt_batch_size ${gt_batch_size} --audio_length=8
|
reward_models/multi_reward_ib_av_at_desync_models.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
imagebind_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_inference_ema/imagebind_score.tsv'
|
| 5 |
+
|
| 6 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_inference_ema/imagebind_score.tsv'
|
| 7 |
+
|
| 8 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/imagebind_score.tsv'
|
| 9 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_full_reward_ib_desync_iter2_for_dpo_inference_ema/imagebind_score.tsv'
|
| 10 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/imagebind_score.tsv'
|
| 11 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/imagebind_score.tsv'
|
| 12 |
+
|
| 13 |
+
imagebind_at_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_inference_ema/imagebind_at_score.tsv'
|
| 14 |
+
|
| 15 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_inference_ema/imagebind_at_score.tsv'
|
| 16 |
+
|
| 17 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/imagebind_at_score.tsv'
|
| 18 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_full_reward_ib_desync_iter2_for_dpo_inference_ema/imagebind_at_score.tsv'
|
| 19 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/imagebind_at_score.tsv'
|
| 20 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/imagebind_at_score.tsv'
|
| 21 |
+
|
| 22 |
+
desync_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_inference_ema/desync_score.tsv'
|
| 23 |
+
|
| 24 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_inference_ema/desync_score.tsv'
|
| 25 |
+
|
| 26 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/desync_score.tsv'
|
| 27 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_full_reward_ib_desync_iter2_for_dpo_inference_ema/desync_score.tsv'
|
| 28 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema/desync_score.tsv'
|
| 29 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/desync_score.tsv'
|
| 30 |
+
|
| 31 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter2_steps5k_inference_ema'
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-gen_dpo_data_May17_lumina_v2a_two_stream_depth16_caption_beta20000_full_reward_ib_desync_iter1_steps5k_inference_ema'
|
| 35 |
+
|
| 36 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema'
|
| 37 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_May10_depth16_caption_2000samples_full_reward_ib_desync_iter2_for_dpo_inference_ema'
|
| 38 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_April12_depth16_caption_10audio_per_video_inference_ema'
|
| 39 |
+
#'/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema'
|
| 40 |
+
|
| 41 |
+
num_samples = 5 #5
|
| 42 |
+
|
| 43 |
+
imagebind_df_list = pd.read_csv(imagebind_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 44 |
+
desync_df_list = pd.read_csv(desync_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 45 |
+
imagebind_at_df_list = pd.read_csv(imagebind_at_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 46 |
+
imagebind_at_df_list_new = []
|
| 47 |
+
|
| 48 |
+
for idx, record in enumerate(imagebind_df_list):
|
| 49 |
+
id = record['id']
|
| 50 |
+
for record_ib_at in imagebind_at_df_list:
|
| 51 |
+
if record_ib_at['id'] == id:
|
| 52 |
+
imagebind_at_df_list_new.append(record_ib_at)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
assert len(imagebind_df_list) == len(desync_df_list) == len(imagebind_at_df_list_new)
|
| 56 |
+
|
| 57 |
+
multi_reward_output_list = []
|
| 58 |
+
dpo_multi_reward_output_list = []
|
| 59 |
+
|
| 60 |
+
for idx, record in enumerate(imagebind_df_list):
|
| 61 |
+
id = record['id']
|
| 62 |
+
assert id == desync_df_list[idx]['id'] == imagebind_at_df_list_new[idx]['id']
|
| 63 |
+
label = record['label']
|
| 64 |
+
output = {
|
| 65 |
+
'id': id,
|
| 66 |
+
'label': label
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
scores = []
|
| 70 |
+
for i in range(num_samples):
|
| 71 |
+
sum_reward_score = 1 * record[f'{str(i+1)}'] + 0 * imagebind_at_df_list_new[idx][f'{str(i+1)}'] - 1 * desync_df_list[idx][f'{str(i+1)}']
|
| 72 |
+
output[f'{str(i+1)}'] = sum_reward_score
|
| 73 |
+
scores.append(sum_reward_score)
|
| 74 |
+
|
| 75 |
+
multi_reward_output_list.append(output)
|
| 76 |
+
|
| 77 |
+
dpo_outputs = {
|
| 78 |
+
'id': id,
|
| 79 |
+
'label': label,
|
| 80 |
+
'chosen': scores.index(max(scores)) + 1,
|
| 81 |
+
'reject': scores.index(min(scores)) + 1
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
dpo_multi_reward_output_list.append(dpo_outputs)
|
| 85 |
+
|
| 86 |
+
output_full_df = pd.DataFrame(multi_reward_output_list)
|
| 87 |
+
output_full_df.to_csv(os.path.join(output_dir, 'full_reward_ib_desync.tsv'), sep='\t', index=False)
|
| 88 |
+
|
| 89 |
+
output_dpo_df = pd.DataFrame(dpo_multi_reward_output_list)
|
| 90 |
+
output_dpo_df.to_csv(os.path.join(output_dir, 'dpo_full_reward_ib_desync.tsv'), sep='\t', index=False)
|
| 91 |
+
|
| 92 |
+
print("Finished !!!")
|
| 93 |
+
|
reward_models/multi_reward_ib_desync_models.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
imagebind_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/imagebind_score.tsv'
|
| 5 |
+
|
| 6 |
+
desync_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/desync_score.tsv'
|
| 7 |
+
|
| 8 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema'
|
| 9 |
+
|
| 10 |
+
num_samples = 5
|
| 11 |
+
|
| 12 |
+
imagebind_df_list = pd.read_csv(imagebind_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 13 |
+
desync_df_list = pd.read_csv(desync_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 14 |
+
|
| 15 |
+
assert len(imagebind_df_list) == len(desync_df_list)
|
| 16 |
+
|
| 17 |
+
multi_reward_output_list = []
|
| 18 |
+
dpo_multi_reward_output_list = []
|
| 19 |
+
|
| 20 |
+
for idx, record in enumerate(imagebind_df_list):
|
| 21 |
+
id = record['id']
|
| 22 |
+
assert id == desync_df_list[idx]['id']
|
| 23 |
+
label = record['label']
|
| 24 |
+
output = {
|
| 25 |
+
'id': id,
|
| 26 |
+
'label': label
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
scores = []
|
| 30 |
+
for i in range(num_samples):
|
| 31 |
+
sum_reward_score = 0.5 * (record[f'{str(i+1)}'] - 2 * desync_df_list[idx][f'{str(i+1)}'])
|
| 32 |
+
output[f'{str(i+1)}'] = sum_reward_score
|
| 33 |
+
scores.append(sum_reward_score)
|
| 34 |
+
|
| 35 |
+
multi_reward_output_list.append(output)
|
| 36 |
+
|
| 37 |
+
dpo_outputs = {
|
| 38 |
+
'id': id,
|
| 39 |
+
'label': label,
|
| 40 |
+
'chosen': scores.index(max(scores)) + 1,
|
| 41 |
+
'reject': scores.index(min(scores)) + 1
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
dpo_multi_reward_output_list.append(dpo_outputs)
|
| 45 |
+
|
| 46 |
+
output_full_df = pd.DataFrame(multi_reward_output_list)
|
| 47 |
+
output_full_df.to_csv(os.path.join(output_dir, 'full_reward_ib_2desync_score.tsv'), sep='\t', index=False)
|
| 48 |
+
|
| 49 |
+
output_dpo_df = pd.DataFrame(dpo_multi_reward_output_list)
|
| 50 |
+
output_dpo_df.to_csv(os.path.join(output_dir, 'dpo_full_reward_ib_2desync.tsv'), sep='\t', index=False)
|
| 51 |
+
|
| 52 |
+
print("Finished !!!")
|
| 53 |
+
|
reward_models/multi_reward_models.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
av_align_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/av_align_score.tsv'
|
| 5 |
+
cavp_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/av_cavp_score.tsv'
|
| 6 |
+
clap_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/clap_score.tsv'
|
| 7 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema'
|
| 8 |
+
num_samples = 5
|
| 9 |
+
|
| 10 |
+
av_align_df_list = pd.read_csv(av_align_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 11 |
+
cavp_df_list = pd.read_csv(cavp_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 12 |
+
clap_df_list = pd.read_csv(clap_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 13 |
+
|
| 14 |
+
assert len(av_align_df_list) == len(cavp_df_list) == len(clap_df_list)
|
| 15 |
+
|
| 16 |
+
multi_reward_output_list = []
|
| 17 |
+
dpo_multi_reward_output_list = []
|
| 18 |
+
|
| 19 |
+
for idx, record in enumerate(av_align_df_list):
|
| 20 |
+
id = record['id']
|
| 21 |
+
assert id == cavp_df_list[idx]['id'] == clap_df_list[idx]['id']
|
| 22 |
+
label = record['label']
|
| 23 |
+
output = {
|
| 24 |
+
'id': id,
|
| 25 |
+
'label': label
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
scores = []
|
| 29 |
+
for i in range(num_samples):
|
| 30 |
+
sum_reward_score = record[f'{str(i+1)}'] + cavp_df_list[idx][f'{str(i+1)}'] + clap_df_list[idx][f'{str(i+1)}']
|
| 31 |
+
output[f'{str(i+1)}'] = sum_reward_score
|
| 32 |
+
scores.append(sum_reward_score)
|
| 33 |
+
|
| 34 |
+
multi_reward_output_list.append(output)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
dpo_outputs = {
|
| 38 |
+
'id': id,
|
| 39 |
+
'label': label,
|
| 40 |
+
'chosen': scores.index(max(scores)) + 1,
|
| 41 |
+
'reject': scores.index(min(scores)) + 1
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
dpo_multi_reward_output_list.append(dpo_outputs)
|
| 45 |
+
|
| 46 |
+
output_full_df = pd.DataFrame(multi_reward_output_list)
|
| 47 |
+
output_full_df.to_csv(os.path.join(output_dir, 'full_reward_score.tsv'), sep='\t', index=False)
|
| 48 |
+
|
| 49 |
+
output_dpo_df = pd.DataFrame(dpo_multi_reward_output_list)
|
| 50 |
+
output_dpo_df.to_csv(os.path.join(output_dir, 'dpo_full_reward.tsv'), sep='\t', index=False)
|
| 51 |
+
|
| 52 |
+
print("Finished !!!")
|
| 53 |
+
|
reward_models/multi_reward_models_new.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
av_align_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/av_align_score.tsv'
|
| 5 |
+
cavp_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/av_cavp_score.tsv'
|
| 6 |
+
clap_csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema/clap_score.tsv'
|
| 7 |
+
output_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/small_44k/vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema'
|
| 8 |
+
num_samples = 5
|
| 9 |
+
threshold = 0.3
|
| 10 |
+
|
| 11 |
+
av_align_df_list = pd.read_csv(av_align_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 12 |
+
cavp_df_list = pd.read_csv(cavp_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 13 |
+
clap_df_list = pd.read_csv(clap_csv_path, sep='\t', dtype={'id': str}).to_dict('records')
|
| 14 |
+
|
| 15 |
+
assert len(av_align_df_list) == len(cavp_df_list) == len(clap_df_list)
|
| 16 |
+
|
| 17 |
+
multi_reward_output_list = []
|
| 18 |
+
dpo_multi_reward_output_list = []
|
| 19 |
+
|
| 20 |
+
for idx, record in enumerate(av_align_df_list):
|
| 21 |
+
id = record['id']
|
| 22 |
+
assert id == cavp_df_list[idx]['id'] == clap_df_list[idx]['id']
|
| 23 |
+
label = record['label']
|
| 24 |
+
output = {
|
| 25 |
+
'id': id,
|
| 26 |
+
'label': label
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
scores = []
|
| 30 |
+
for i in range(num_samples):
|
| 31 |
+
sum_reward_score = 2*record[f'{str(i+1)}'] + cavp_df_list[idx][f'{str(i+1)}'] + 2*clap_df_list[idx][f'{str(i+1)}']
|
| 32 |
+
output[f'{str(i+1)}'] = sum_reward_score
|
| 33 |
+
scores.append(sum_reward_score)
|
| 34 |
+
|
| 35 |
+
multi_reward_output_list.append(output)
|
| 36 |
+
|
| 37 |
+
if (max(scores) - min(scores)) > threshold:
|
| 38 |
+
|
| 39 |
+
dpo_outputs = {
|
| 40 |
+
'id': id,
|
| 41 |
+
'label': label,
|
| 42 |
+
'chosen': scores.index(max(scores)) + 1,
|
| 43 |
+
'reject': scores.index(min(scores)) + 1
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
dpo_multi_reward_output_list.append(dpo_outputs)
|
| 47 |
+
|
| 48 |
+
print(f'the number of original samples is {len(multi_reward_output_list)}')
|
| 49 |
+
output_full_df = pd.DataFrame(multi_reward_output_list)
|
| 50 |
+
output_full_df.to_csv(os.path.join(output_dir, 'full_reward_score_2avalign_1cavp_2clap_thre03.tsv'), sep='\t', index=False)
|
| 51 |
+
|
| 52 |
+
print(f'the number of final samples is {len(dpo_multi_reward_output_list)}')
|
| 53 |
+
output_dpo_df = pd.DataFrame(dpo_multi_reward_output_list)
|
| 54 |
+
output_dpo_df.to_csv(os.path.join(output_dir, 'dpo_full_reward_2avalign_1cavp_2clap_thre03.tsv'), sep='\t', index=False)
|
| 55 |
+
|
| 56 |
+
print("Finished !!!")
|
| 57 |
+
|
runer_scripts_cmd.sh
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
1. OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 training/extract_video_training_latents.py > ./logs/extract_video_latents_16k.log 2>&1 &
|
| 3 |
+
|
| 4 |
+
2. huggingface cache dir: /root/.cache/huggingface/hub
|
| 5 |
+
|
| 6 |
+
2. torchhub cache dir: /root/.cache/torch/hub/checkpoints/
|
| 7 |
+
|
| 8 |
+
2. cache dir: /root/.cache/audioldm_eval/ckpt/Cnn14_16k_mAP=0.438.pth
|
| 9 |
+
|
| 10 |
+
training
|
| 11 |
+
3. OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 train.py exp_id=vgg_only_small_44k model=small_44k > ./logs/train_vgg_only_small_44k.log 2>&1 &
|
| 12 |
+
|
| 13 |
+
inference
|
| 14 |
+
4. OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 batch_eval.py duration_s=8 dataset=vggsound model=small_44k num_workers=8 > ./logs/inference_vgg_only_small_44k.log 2>&1 &
|
| 15 |
+
|
| 16 |
+
demo example
|
| 17 |
+
5. CUDA_VISIBLE_DEVICES=0 python demo.py --variant="small_16k" --duration=4 --video='' --prompt ""
|
| 18 |
+
|
| 19 |
+
CUDA_VISIBLE_DEVICES=0 python demo.py --variant="small_44k" --duration=10 --video='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/example_demo_videos/_jB-IM_77lI_000000_silent.mp4' --prompt ""
|
| 20 |
+
|
| 21 |
+
CUDA_VISIBLE_DEVICES=2 python demo.py --variant="small_44k" --duration=4 --video='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/example_demo_videos/demo2.mp4' --prompt ""
|
| 22 |
+
|
| 23 |
+
6. moviegen
|
| 24 |
+
CUDA_VISIBLE_DEVICES=1 python demo.py --variant="small_44k" --duration=11 --video='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/example_demo_videos/moviegen/video1.mp4' --prompt ""
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
7. audio waveforms for vggsound
|
| 28 |
+
/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/audios_vggsound
|
| 29 |
+
|
| 30 |
+
8. demo new for paper
|
| 31 |
+
CUDA_VISIBLE_DEVICES=2 python demo.py --variant="small_44k" --duration=4 --video='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/example_demo_videos/demo_new/.mp4' --prompt ""
|
| 32 |
+
|
| 33 |
+
9 training for vggsound with text caption
|
| 34 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 train.py exp_id=vgg_only_small_44k_caption_jan26 model=small_44k > ./logs/train_vgg_only_small_44k_caption_jan26.log 2>&1 &
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
10 generate data for dpo
|
| 38 |
+
(1) change eval_for_dpo_config.yaml
|
| 39 |
+
(2) change eval_data/base.yaml
|
| 40 |
+
|
| 41 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 batch_eval_dpo.py duration_s=8 dataset=vggsound_dpo model=small_44k num_workers=8 > ./logs_dpo/inference_vgg_only_small_44k_new_model_lumina_v2a_two_stream_Feb28_depth16_caption_dpo.log 2>&1 &
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
11. nohup python dpo_training/create_dpo_file.py > ./logs/create_dop_file.log 2>&1 &
|
| 45 |
+
nohup python reward_models/cavp.py > ./logs/create_dop_cavp_file.log 2>&1 &
|
| 46 |
+
|
| 47 |
+
12. dpo training
|
| 48 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 train_dpo.py exp_id=vgg_only_small_44k model=small_44k > ./logs_dpo/train_vgg_only_small_44k.log 2>&1 &
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
13. extraction features for dpo
|
| 53 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 dpo_training/extract_video_training_latents.py > ./logs_dpo/extract_video_latents_44k_vggsound_dpo-new_model_lumina_v2a_two_stream_Mar1_depth16_caption_inference_ema_iter1_cavp.log 2>&1 &
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
14. after generating data for dpo, how to create dpo data file
|
| 57 |
+
(1) python ./dpo_training/generated_videos_file.py # get video json file
|
| 58 |
+
(2) python ./dpo_training/create_dpo_file.py # get av-align dpo file
|
| 59 |
+
(3) python ./reward_models/clap_multi_gpu.py (clap.py) # get clap dpo file
|
| 60 |
+
(4) python ./reward_models/cavp.py # get cavp dpo file, note the reencode_video.py should be runned first for 4fps video
|
| 61 |
+
|
| 62 |
+
# after that, extract video and audio features
|
| 63 |
+
(1) change dpo_training/extract_video_training_latents.py file
|
| 64 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 dpo_training/extract_video_training_latents.py > ./logs_dpo/extract_video_latents_44k_vggsound_dpo-new_model_lumina_v2a_two_stream_Mar12_depth16_caption_10000samples_inference_ema_iter1_cavp.log 2>&1 &
|
| 65 |
+
|
| 66 |
+
# dpo training # change config files: train_dpo_config.yaml / dpo_base_config.yaml / ./dpo_data/base.yaml
|
| 67 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 train_dpo.py exp_id=vgg_only_small_44k_lumina_v2a_two_stream_May7_depth16_caption_beta10000 model=small_44k > ./logs_dpo/train_vgg_only_small_44k_lumina_v2a_two_stream_May7_depth16_caption_beta10000.log 2>&1 &
|
| 68 |
+
|
| 69 |
+
# inference:
|
| 70 |
+
(1) change the 'model_path' in ./mmaudio/eval_utils.py file
|
| 71 |
+
(2) change the config files: eval_config.yaml, ./eval_data/base.yaml
|
| 72 |
+
(3) OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 batch_eval.py duration_s=8 dataset=vggsound model=small_44k num_workers=8 > ./logs_dpo/inference_vgg_only_small_44k_dpo_iter1_cavp_May7_lumina_v2a_two_stream_depth16_caption_2000samples_beta10000.log 2>&1 &
|
| 73 |
+
|
| 74 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 batch_eval.py duration_s=8 dataset=moviegen model=small_44k num_workers=8 > ./logs_dpo/lumina_v2a_moviegen_Sep24_inference_ema.log 2>&1 &
|
| 75 |
+
|
| 76 |
+
# evaluation
|
| 77 |
+
(1) change ./av-benchmark/evaluate.sh
|
| 78 |
+
(2) bash evaluate.sh
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 train_dpo.py exp_id=vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter3_steps5k model=small_44k > ./logs_dpo/train_vgg_only_small_44k_lumina_v2a_two_stream_May17_depth16_caption_beta20000_full_reward_ib_desync_iter3_steps5k.log 2>&1 &
|
| 82 |
+
|
| 83 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 batch_eval.py duration_s=8 dataset=vggsound model=small_44k num_workers=8 > ./logs_dpo/inference_vgg_only_small_44k_dpo_May17_lumina_v2a_two_stream_depth16_caption_2000samples_beta20000_full_reward_ib_desync_iter3_steps5k.log 2>&1 &
|
| 84 |
+
|
| 85 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 batch_eval_dpo.py duration_s=8 dataset=vggsound_dpo model=small_44k num_workers=8 > ./logs_dpo/inference_vgg_only_small_44k_new_model_lumina_v2a_two_stream_May17_depth16_caption__beta20000_full_reward_ib_desync_iter3_steps5k_for_dpo.log 2>&1 &
|
| 86 |
+
|
| 87 |
+
OMP_NUM_THREADS=4 nohup torchrun --standalone --nproc_per_node=4 dpo_training/extract_video_training_latents.py > ./logs_dpo/extract_video_latents_44k_vggsound_dpo-new_model_lumina_v2a_two_stream_Mar17_depth16_caption_2000samples_steps5k_iter3_ib_desync.log 2>&1 &
|
train_dpo.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import logging
|
| 3 |
+
import math
|
| 4 |
+
import random
|
| 5 |
+
from datetime import timedelta
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
import hydra
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import torch.distributed as distributed
|
| 12 |
+
from hydra import compose
|
| 13 |
+
from hydra.core.hydra_config import HydraConfig
|
| 14 |
+
from omegaconf import DictConfig, open_dict
|
| 15 |
+
from torch.distributed.elastic.multiprocessing.errors import record
|
| 16 |
+
|
| 17 |
+
from mmaudio.data.data_setup import setup_training_datasets, setup_val_datasets
|
| 18 |
+
from mmaudio.model.sequence_config import CONFIG_16K, CONFIG_44K
|
| 19 |
+
from mmaudio.runner import Runner
|
| 20 |
+
from mmaudio.sample import sample
|
| 21 |
+
from mmaudio.utils.dist_utils import info_if_rank_zero, local_rank, world_size
|
| 22 |
+
from mmaudio.utils.logger import TensorboardLogger
|
| 23 |
+
from mmaudio.utils.synthesize_ema import synthesize_ema, synthesize_ema_dpo
|
| 24 |
+
|
| 25 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 26 |
+
torch.backends.cudnn.allow_tf32 = True
|
| 27 |
+
|
| 28 |
+
log = logging.getLogger()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def distributed_setup():
|
| 32 |
+
distributed.init_process_group(backend="nccl", timeout=timedelta(hours=2))
|
| 33 |
+
log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}')
|
| 34 |
+
return local_rank, world_size
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@record
|
| 38 |
+
@hydra.main(version_base='1.3.2', config_path='config', config_name='train_dpo_config.yaml') # todo Mar 2
|
| 39 |
+
def train(cfg: DictConfig):
|
| 40 |
+
# initial setup
|
| 41 |
+
torch.cuda.set_device(local_rank)
|
| 42 |
+
torch.backends.cudnn.benchmark = cfg.cudnn_benchmark
|
| 43 |
+
distributed_setup()
|
| 44 |
+
num_gpus = world_size
|
| 45 |
+
run_dir = HydraConfig.get().run.dir
|
| 46 |
+
|
| 47 |
+
# compose early such that it does not rely on future hard disk reading
|
| 48 |
+
eval_cfg = compose('eval_config', overrides=[f'exp_id={cfg.exp_id}'])
|
| 49 |
+
|
| 50 |
+
# patch data dim
|
| 51 |
+
if cfg.model.endswith('16k'):
|
| 52 |
+
seq_cfg = CONFIG_16K
|
| 53 |
+
elif cfg.model.endswith('44k'):
|
| 54 |
+
seq_cfg = CONFIG_44K
|
| 55 |
+
else:
|
| 56 |
+
raise ValueError(f'Unknown model: {cfg.model}')
|
| 57 |
+
with open_dict(cfg):
|
| 58 |
+
cfg.data_dim.latent_seq_len = seq_cfg.latent_seq_len
|
| 59 |
+
cfg.data_dim.clip_seq_len = seq_cfg.clip_seq_len
|
| 60 |
+
cfg.data_dim.sync_seq_len = seq_cfg.sync_seq_len
|
| 61 |
+
|
| 62 |
+
# wrap python logger with a tensorboard logger
|
| 63 |
+
log = TensorboardLogger(cfg.exp_id,
|
| 64 |
+
run_dir,
|
| 65 |
+
logging.getLogger(),
|
| 66 |
+
is_rank0=(local_rank == 0),
|
| 67 |
+
enable_email=cfg.enable_email and not cfg.debug)
|
| 68 |
+
|
| 69 |
+
info_if_rank_zero(log, f'All configuration: {cfg}')
|
| 70 |
+
info_if_rank_zero(log, f'Number of GPUs detected: {num_gpus}')
|
| 71 |
+
|
| 72 |
+
# number of dataloader workers
|
| 73 |
+
info_if_rank_zero(log, f'Number of dataloader workers (per GPU): {cfg.num_workers}')
|
| 74 |
+
|
| 75 |
+
# Set seeds to ensure the same initialization
|
| 76 |
+
torch.manual_seed(cfg.seed)
|
| 77 |
+
np.random.seed(cfg.seed)
|
| 78 |
+
random.seed(cfg.seed)
|
| 79 |
+
|
| 80 |
+
# setting up configurations
|
| 81 |
+
info_if_rank_zero(log, f'Training configuration: {cfg}')
|
| 82 |
+
cfg.batch_size //= num_gpus
|
| 83 |
+
info_if_rank_zero(log, f'Batch size (per GPU): {cfg.batch_size}')
|
| 84 |
+
|
| 85 |
+
# determine time to change max skip
|
| 86 |
+
total_iterations = cfg['num_iterations']
|
| 87 |
+
|
| 88 |
+
# setup datasets
|
| 89 |
+
dataset, sampler, loader = setup_training_datasets(cfg)
|
| 90 |
+
info_if_rank_zero(log, f'Number of training samples: {len(dataset)}')
|
| 91 |
+
info_if_rank_zero(log, f'Number of training batches: {len(loader)}')
|
| 92 |
+
|
| 93 |
+
val_dataset, val_loader, eval_loader = setup_val_datasets(cfg)
|
| 94 |
+
info_if_rank_zero(log, f'Number of val samples: {len(val_dataset)}')
|
| 95 |
+
val_cfg = cfg.dpo_data.ExtractedVGG_val
|
| 96 |
+
|
| 97 |
+
# compute and set mean and std
|
| 98 |
+
# latent_mean_chosen, latent_std_chosen, latent_mean_reject, latent_std_reject = dataset.compute_latent_stats() # todo Mar 2
|
| 99 |
+
# latent_mean_dpo = torch.stack([latent_mean_chosen, latent_mean_reject], dim=0) # todo Mar 2
|
| 100 |
+
# latent_std_dpo = torch.stack([latent_std_chosen, latent_std_reject], dim=0) # todo Mar 2
|
| 101 |
+
|
| 102 |
+
# construct the trainer
|
| 103 |
+
trainer = Runner(cfg,
|
| 104 |
+
log=log,
|
| 105 |
+
run_path=run_dir,
|
| 106 |
+
for_training=True,
|
| 107 |
+
latent_mean=None,
|
| 108 |
+
latent_std=None,
|
| 109 |
+
dpo_train=True).enter_train() # todo Mar 2
|
| 110 |
+
eval_rng_clone = trainer.rng.graphsafe_get_state()
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
# todo load previous checkpoint if needed (including model weights, ema, and optimizer, scheduler)
|
| 114 |
+
if cfg['checkpoint'] is not None:
|
| 115 |
+
curr_iter = trainer.load_checkpoint(cfg['checkpoint'])
|
| 116 |
+
cfg['checkpoint'] = None
|
| 117 |
+
info_if_rank_zero(log, 'Model checkpoint loaded!')
|
| 118 |
+
else:
|
| 119 |
+
# if run_dir exists, load the latest checkpoint
|
| 120 |
+
checkpoint = trainer.get_latest_checkpoint_path()
|
| 121 |
+
if checkpoint is not None:
|
| 122 |
+
curr_iter = trainer.load_checkpoint(checkpoint)
|
| 123 |
+
info_if_rank_zero(log, 'Latest checkpoint loaded!')
|
| 124 |
+
else:
|
| 125 |
+
# load previous network weights if needed # todo may be ok for dpo?
|
| 126 |
+
curr_iter = 0
|
| 127 |
+
if cfg['weights'] is not None:
|
| 128 |
+
info_if_rank_zero(log, 'Loading weights from the disk')
|
| 129 |
+
trainer.load_weights(cfg['weights'])
|
| 130 |
+
cfg['weights'] = None
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
# determine max epoch
|
| 134 |
+
total_epoch = math.ceil(total_iterations / len(loader))
|
| 135 |
+
current_epoch = curr_iter // len(loader)
|
| 136 |
+
info_if_rank_zero(log, f'We will approximately use {total_epoch} epochs.')
|
| 137 |
+
|
| 138 |
+
# training loop
|
| 139 |
+
try:
|
| 140 |
+
# Need this to select random bases in different workers
|
| 141 |
+
np.random.seed(np.random.randint(2 ** 30 - 1) + local_rank * 1000)
|
| 142 |
+
while curr_iter < total_iterations:
|
| 143 |
+
# Crucial for randomness!
|
| 144 |
+
sampler.set_epoch(current_epoch)
|
| 145 |
+
current_epoch += 1
|
| 146 |
+
log.debug(f'Current epoch: {current_epoch}')
|
| 147 |
+
|
| 148 |
+
trainer.enter_train()
|
| 149 |
+
trainer.log.data_timer.start()
|
| 150 |
+
for data in loader:
|
| 151 |
+
trainer.train_dpo_pass(data, curr_iter)
|
| 152 |
+
|
| 153 |
+
if (curr_iter + 1) % cfg.val_interval == 0:
|
| 154 |
+
# swap into a eval rng state, i.e., use the same seed for every validation pass
|
| 155 |
+
train_rng_snapshot = trainer.rng.graphsafe_get_state()
|
| 156 |
+
trainer.rng.graphsafe_set_state(eval_rng_clone)
|
| 157 |
+
info_if_rank_zero(log, f'Iteration {curr_iter}: validating')
|
| 158 |
+
for data in val_loader:
|
| 159 |
+
trainer.validation_pass(data, curr_iter)
|
| 160 |
+
distributed.barrier()
|
| 161 |
+
trainer.val_integrator.finalize('val', curr_iter, ignore_timer=True)
|
| 162 |
+
trainer.rng.graphsafe_set_state(train_rng_snapshot)
|
| 163 |
+
# todo Jan 12
|
| 164 |
+
# if (curr_iter + 1) % cfg.eval_interval == 0:
|
| 165 |
+
# save_eval = (curr_iter + 1) % cfg.save_eval_interval == 0
|
| 166 |
+
# train_rng_snapshot = trainer.rng.graphsafe_get_state()
|
| 167 |
+
# trainer.rng.graphsafe_set_state(eval_rng_clone)
|
| 168 |
+
# info_if_rank_zero(log, f'Iteration {curr_iter}: validating')
|
| 169 |
+
# for data in eval_loader:
|
| 170 |
+
# audio_path = trainer.inference_pass(data,
|
| 171 |
+
# curr_iter,
|
| 172 |
+
# val_cfg,
|
| 173 |
+
# save_eval=save_eval)
|
| 174 |
+
# distributed.barrier()
|
| 175 |
+
# trainer.rng.graphsafe_set_state(train_rng_snapshot)
|
| 176 |
+
# trainer.eval(audio_path, curr_iter, 1, val_cfg)
|
| 177 |
+
|
| 178 |
+
curr_iter += 1
|
| 179 |
+
|
| 180 |
+
if curr_iter >= total_iterations:
|
| 181 |
+
break
|
| 182 |
+
except Exception as e:
|
| 183 |
+
log.error(f'Error occurred at iteration {curr_iter}!')
|
| 184 |
+
log.critical(e.message if hasattr(e, 'message') else str(e))
|
| 185 |
+
raise
|
| 186 |
+
finally:
|
| 187 |
+
if not cfg.debug:
|
| 188 |
+
trainer.save_checkpoint(curr_iter)
|
| 189 |
+
trainer.save_weights(curr_iter)
|
| 190 |
+
|
| 191 |
+
# Inference pass
|
| 192 |
+
del trainer
|
| 193 |
+
torch.cuda.empty_cache()
|
| 194 |
+
|
| 195 |
+
# Synthesize EMA
|
| 196 |
+
if local_rank == 0:
|
| 197 |
+
log.info(f'Synthesizing EMA with sigma={cfg.ema.default_output_sigma}')
|
| 198 |
+
ema_sigma = cfg.ema.default_output_sigma
|
| 199 |
+
state_dict = synthesize_ema(cfg, ema_sigma, step=None)
|
| 200 |
+
save_dir = Path(run_dir) / f'{cfg.exp_id}_ema_final.pth'
|
| 201 |
+
torch.save(state_dict, save_dir)
|
| 202 |
+
log.info(f'Synthesized EMA saved to {save_dir}!')
|
| 203 |
+
distributed.barrier()
|
| 204 |
+
|
| 205 |
+
# todo Jan 12
|
| 206 |
+
# log.info(f'Evaluation: {eval_cfg}')
|
| 207 |
+
# sample(eval_cfg)
|
| 208 |
+
|
| 209 |
+
# clean-up
|
| 210 |
+
log.complete()
|
| 211 |
+
distributed.barrier()
|
| 212 |
+
distributed.destroy_process_group()
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
if __name__ == '__main__':
|
| 216 |
+
train()
|