kaiw7 commited on
Commit
2e3a57c
·
verified ·
1 Parent(s): df29d22

Add files using upload-large-folder tool

Browse files
Files changed (12) hide show
  1. .gitignore +146 -0
  2. LICENSE +21 -0
  3. README.md +177 -0
  4. batch_eval_dpo.py +144 -0
  5. demo.py +143 -0
  6. download_hg.py +18 -0
  7. pyproject.toml +55 -0
  8. reward_models/ib_at_sync.py +238 -0
  9. save_ema.py +31 -0
  10. test_try.py +18 -0
  11. train.py +211 -0
  12. train_dpo-Copy1.py +216 -0
.gitignore ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ run_*.sh
2
+ log/
3
+ saves
4
+ saves/
5
+ weights/
6
+ weights
7
+ output/
8
+ output
9
+ pretrained/
10
+ workspace
11
+ workspace/
12
+ ext_weights/
13
+ ext_weights
14
+ .checkpoints/
15
+ .vscode/
16
+ training/example_output/
17
+
18
+ # Byte-compiled / optimized / DLL files
19
+ __pycache__/
20
+ *.py[cod]
21
+ *$py.class
22
+
23
+ # C extensions
24
+ *.so
25
+
26
+ # Distribution / packaging
27
+ .Python
28
+ build/
29
+ develop-eggs/
30
+ dist/
31
+ downloads/
32
+ eggs/
33
+ .eggs/
34
+ lib/
35
+ lib64/
36
+ parts/
37
+ sdist/
38
+ var/
39
+ wheels/
40
+ pip-wheel-metadata/
41
+ share/python-wheels/
42
+ *.egg-info/
43
+ .installed.cfg
44
+ *.egg
45
+ MANIFEST
46
+
47
+ # PyInstaller
48
+ # Usually these files are written by a python script from a template
49
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
50
+ *.manifest
51
+ *.spec
52
+
53
+ # Installer logs
54
+ pip-log.txt
55
+ pip-delete-this-directory.txt
56
+
57
+ # Unit test / coverage reports
58
+ htmlcov/
59
+ .tox/
60
+ .nox/
61
+ .coverage
62
+ .coverage.*
63
+ .cache
64
+ nosetests.xml
65
+ coverage.xml
66
+ *.cover
67
+ *.py,cover
68
+ .hypothesis/
69
+ .pytest_cache/
70
+
71
+ # Translations
72
+ *.mo
73
+ *.pot
74
+
75
+ # Django stuff:
76
+ *.log
77
+ local_settings.py
78
+ db.sqlite3
79
+ db.sqlite3-journal
80
+
81
+ # Flask stuff:
82
+ instance/
83
+ .webassets-cache
84
+
85
+ # Scrapy stuff:
86
+ .scrapy
87
+
88
+ # Sphinx documentation
89
+ docs/_build/
90
+
91
+ # PyBuilder
92
+ target/
93
+
94
+ # Jupyter Notebook
95
+ .ipynb_checkpoints
96
+
97
+ # IPython
98
+ profile_default/
99
+ ipython_config.py
100
+
101
+ # pyenv
102
+ .python-version
103
+
104
+ # pipenv
105
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
106
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
107
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
108
+ # install all needed dependencies.
109
+ #Pipfile.lock
110
+
111
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
112
+ __pypackages__/
113
+
114
+ # Celery stuff
115
+ celerybeat-schedule
116
+ celerybeat.pid
117
+
118
+ # SageMath parsed files
119
+ *.sage.py
120
+
121
+ # Environments
122
+ .env
123
+ .venv
124
+ env/
125
+ venv/
126
+ ENV/
127
+ env.bak/
128
+ venv.bak/
129
+
130
+ # Spyder project settings
131
+ .spyderproject
132
+ .spyproject
133
+
134
+ # Rope project settings
135
+ .ropeproject
136
+
137
+ # mkdocs documentation
138
+ /site
139
+
140
+ # mypy
141
+ .mypy_cache/
142
+ .dmypy.json
143
+ dmypy.json
144
+
145
+ # Pyre type checker
146
+ .pyre/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Ho Kei Cheng
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <p align="center">
3
+ <h2>MMAudio</h2>
4
+ <a href="https://arxiv.org/abs/2412.15322">Paper</a> | <a href="https://hkchengrex.github.io/MMAudio">Webpage</a> | <a href="https://huggingface.co/hkchengrex/MMAudio/tree/main">Models</a> | <a href="https://huggingface.co/spaces/hkchengrex/MMAudio"> Huggingface Demo</a> | <a href="https://colab.research.google.com/drive/1TAaXCY2-kPk4xE4PwKB3EqFbSnkUuzZ8?usp=sharing">Colab Demo</a> | <a href="https://replicate.com/zsxkib/mmaudio">Replicate Demo</a>
5
+ </p>
6
+ </div>
7
+
8
+ ## [Taming Multimodal Joint Training for High-Quality Video-to-Audio Synthesis](https://hkchengrex.github.io/MMAudio)
9
+
10
+ [Ho Kei Cheng](https://hkchengrex.github.io/), [Masato Ishii](https://scholar.google.co.jp/citations?user=RRIO1CcAAAAJ), [Akio Hayakawa](https://scholar.google.com/citations?user=sXAjHFIAAAAJ), [Takashi Shibuya](https://scholar.google.com/citations?user=XCRO260AAAAJ), [Alexander Schwing](https://www.alexander-schwing.de/), [Yuki Mitsufuji](https://www.yukimitsufuji.com/)
11
+
12
+ University of Illinois Urbana-Champaign, Sony AI, and Sony Group Corporation
13
+
14
+ ## Highlight
15
+
16
+ MMAudio generates synchronized audio given video and/or text inputs.
17
+ Our key innovation is multimodal joint training which allows training on a wide range of audio-visual and audio-text datasets.
18
+ Moreover, a synchronization module aligns the generated audio with the video frames.
19
+
20
+ ## Results
21
+
22
+ (All audio from our algorithm MMAudio)
23
+
24
+ Videos from Sora:
25
+
26
+ https://github.com/user-attachments/assets/82afd192-0cee-48a1-86ca-bd39b8c8f330
27
+
28
+ Videos from Veo 2:
29
+
30
+ https://github.com/user-attachments/assets/8a11419e-fee2-46e0-9e67-dfb03c48d00e
31
+
32
+ Videos from MovieGen/Hunyuan Video/VGGSound:
33
+
34
+ https://github.com/user-attachments/assets/29230d4e-21c1-4cf8-a221-c28f2af6d0ca
35
+
36
+ For more results, visit https://hkchengrex.com/MMAudio/video_main.html.
37
+
38
+
39
+ ## Installation
40
+
41
+ We have only tested this on Ubuntu.
42
+
43
+ ### Prerequisites
44
+
45
+ We recommend using a [miniforge](https://github.com/conda-forge/miniforge) environment.
46
+
47
+ - Python 3.9+
48
+ - PyTorch **2.5.1+** and corresponding torchvision/torchaudio (pick your CUDA version https://pytorch.org/, pip install recommended)
49
+ <!-- - ffmpeg<7 ([this is required by torchaudio](https://pytorch.org/audio/master/installation.html#optional-dependencies), you can install it in a miniforge environment with `conda install -c conda-forge 'ffmpeg<7'`) -->
50
+
51
+ **1. Install prerequisite if not yet met:**
52
+
53
+ ```bash
54
+ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 --upgrade
55
+ ```
56
+
57
+ (Or any other CUDA versions that your GPUs/driver support)
58
+
59
+ <!-- ```
60
+ conda install -c conda-forge 'ffmpeg<7
61
+ ```
62
+ (Optional, if you use miniforge and don't already have the appropriate ffmpeg) -->
63
+
64
+ **2. Clone our repository:**
65
+
66
+ ```bash
67
+ git clone https://github.com/hkchengrex/MMAudio.git
68
+ ```
69
+
70
+ **3. Install with pip (install pytorch first before attempting this!):**
71
+
72
+ ```bash
73
+ cd MMAudio
74
+ pip install -e .
75
+ ```
76
+
77
+ (If you encounter the File "setup.py" not found error, upgrade your pip with pip install --upgrade pip)
78
+
79
+
80
+ **Pretrained models:**
81
+
82
+ The models will be downloaded automatically when you run the demo script. MD5 checksums are provided in `mmaudio/utils/download_utils.py`.
83
+ The models are also available at https://huggingface.co/hkchengrex/MMAudio/tree/main
84
+ See [MODELS.md](docs/MODELS.md) for more details.
85
+
86
+ ## Demo
87
+
88
+ By default, these scripts use the `large_44k_v2` model.
89
+ In our experiments, inference only takes around 6GB of GPU memory (in 16-bit mode) which should fit in most modern GPUs.
90
+
91
+ ### Command-line interface
92
+
93
+ With `demo.py`
94
+
95
+ ```bash
96
+ python demo.py --duration=8 --video=<path to video> --prompt "your prompt"
97
+ ```
98
+
99
+ The output (audio in `.flac` format, and video in `.mp4` format) will be saved in `./output`.
100
+ See the file for more options.
101
+ Simply omit the `--video` option for text-to-audio synthesis.
102
+ The default output (and training) duration is 8 seconds. Longer/shorter durations could also work, but a large deviation from the training duration may result in a lower quality.
103
+
104
+ ### Gradio interface
105
+
106
+ Supports video-to-audio and text-to-audio synthesis.
107
+ You can also try experimental image-to-audio synthesis which duplicates the input image to a video for processing. This might be interesting to some but it is not something MMAudio has been trained for.
108
+ Use [port forwarding](https://unix.stackexchange.com/questions/115897/whats-ssh-port-forwarding-and-whats-the-difference-between-ssh-local-and-remot) (e.g., `ssh -L 7860:localhost:7860 server`) if necessary. The default port is `7860` which you can specify with `--port`.
109
+
110
+ ```bash
111
+ python gradio_demo.py
112
+ ```
113
+
114
+ ### FAQ
115
+
116
+ 1. Video processing
117
+ - Processing higher-resolution videos takes longer due to encoding and decoding (which can take >95% of the processing time!), but it does not improve the quality of results.
118
+ - The CLIP encoder resizes input frames to 384×384 pixels.
119
+ - Synchformer resizes the shorter edge to 224 pixels and applies a center crop, focusing only on the central square of each frame.
120
+ 2. Frame rates
121
+ - The CLIP model operates at 8 FPS, while Synchformer works at 25 FPS.
122
+ - Frame rate conversion happens on-the-fly via the video reader.
123
+ - For input videos with a frame rate below 25 FPS, frames will be duplicated to match the required rate.
124
+ 3. Failure cases
125
+ As with most models of this type, failures can occur, and the reasons are not always clear. Below are some known failure modes. If you notice a failure mode or believe there’s a bug, feel free to open an issue in the repository.
126
+ 4. Performance variations
127
+ We notice that there can be subtle performance variations in different hardware and software environments. Some of the reasons include using/not using `torch.compile`, video reader library/backend, inference precision, batch sizes, random seeds, etc. We (will) provide pre-computed results on standard benchmark for reference. Results obtained from this codebase should be similar but might not be exactly the same.
128
+
129
+ ### Known limitations
130
+
131
+ 1. The model sometimes generates unintelligible human speech-like sounds
132
+ 2. The model sometimes generates background music (without explicit training, it would not be high quality)
133
+ 3. The model struggles with unfamiliar concepts, e.g., it can generate "gunfires" but not "RPG firing".
134
+
135
+ We believe all of these three limitations can be addressed with more high-quality training data.
136
+
137
+ ## Training
138
+
139
+ See [TRAINING.md](docs/TRAINING.md).
140
+
141
+ ## Evaluation
142
+
143
+ See [EVAL.md](docs/EVAL.md).
144
+
145
+ ## Training Datasets
146
+
147
+ MMAudio was trained on several datasets, including [AudioSet](https://research.google.com/audioset/), [Freesound](https://github.com/LAION-AI/audio-dataset/blob/main/laion-audio-630k/README.md), [VGGSound](https://www.robots.ox.ac.uk/~vgg/data/vggsound/), [AudioCaps](https://audiocaps.github.io/), and [WavCaps](https://github.com/XinhaoMei/WavCaps). These datasets are subject to specific licenses, which can be accessed on their respective websites. We do not guarantee that the pre-trained models are suitable for commercial use. Please use them at your own risk.
148
+
149
+ ## Update Logs
150
+
151
+ - 2024-12-23: Added training and batch evaluation scripts.
152
+ - 2024-12-14: Removed the `ffmpeg<7` requirement for the demos by replacing `torio.io.StreamingMediaDecoder` with `pyav` for reading frames. The read frames are also cached, so we are not reading the same frames again during reconstruction. This should speed things up and make installation less of a hassle.
153
+ - 2024-12-13: Improved for-loop processing in CLIP/Sync feature extraction by introducing a batch size multiplier. We can approximately use 40x batch size for CLIP/Sync without using more memory, thereby speeding up processing. Removed VAE encoder during inference -- we don't need it.
154
+ - 2024-12-11: Replaced `torio.io.StreamingMediaDecoder` with `pyav` for reading framerate when reconstructing the input video. `torio.io.StreamingMediaDecoder` does not work reliably in huggingface ZeroGPU's environment, and I suspect that it might not work in some other environments as well.
155
+
156
+ ## Citation
157
+
158
+ ```bibtex
159
+ @inproceedings{cheng2024taming,
160
+ title={Taming Multimodal Joint Training for High-Quality Video-to-Audio Synthesis},
161
+ author={Cheng, Ho Kei and Ishii, Masato and Hayakawa, Akio and Shibuya, Takashi and Schwing, Alexander and Mitsufuji, Yuki},
162
+ booktitle={arXiv},
163
+ year={2024}
164
+ }
165
+ ```
166
+
167
+ ## Relevant Repositories
168
+
169
+ - [av-benchmark](https://github.com/hkchengrex/av-benchmark) for benchmarking results.
170
+
171
+ ## Acknowledgement
172
+
173
+ Many thanks to:
174
+ - [Make-An-Audio 2](https://github.com/bytedance/Make-An-Audio-2) for the 16kHz BigVGAN pretrained model and the VAE architecture
175
+ - [BigVGAN](https://github.com/NVIDIA/BigVGAN)
176
+ - [Synchformer](https://github.com/v-iashin/Synchformer)
177
+ - [EDM2](https://github.com/NVlabs/edm2) for the magnitude-preserving network architecture
batch_eval_dpo.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import hydra
6
+ import torch
7
+ import torch.distributed as distributed
8
+ import torchaudio
9
+ from hydra.core.hydra_config import HydraConfig
10
+ from omegaconf import DictConfig
11
+ from tqdm import tqdm
12
+
13
+ from mmaudio.data.data_setup import setup_eval_dataset
14
+ from mmaudio.eval_utils import ModelConfig, all_model_cfg, generate, make_video_new, load_full_video_frames, make_video, generate_dpo
15
+ from mmaudio.model.flow_matching import FlowMatching
16
+ from mmaudio.model.networks_new import MMAudio, get_my_mmaudio
17
+ from mmaudio.model.utils.features_utils import FeaturesUtils
18
+
19
+ torch.backends.cuda.matmul.allow_tf32 = True
20
+ torch.backends.cudnn.allow_tf32 = True
21
+
22
+ local_rank = int(os.environ['LOCAL_RANK'])
23
+ world_size = int(os.environ['WORLD_SIZE'])
24
+ log = logging.getLogger()
25
+
26
+
27
+ @torch.inference_mode()
28
+ @hydra.main(version_base='1.3.2', config_path='config', config_name='eval_for_dpo_config.yaml')
29
+ def main(cfg: DictConfig):
30
+ device = 'cuda'
31
+ torch.cuda.set_device(local_rank)
32
+
33
+ if cfg.model not in all_model_cfg:
34
+ raise ValueError(f'Unknown model variant: {cfg.model}')
35
+ model: ModelConfig = all_model_cfg[cfg.model]
36
+ # model.download_if_needed()
37
+ seq_cfg = model.seq_cfg
38
+
39
+ run_dir = Path(HydraConfig.get().run.dir)
40
+ if cfg.output_name is None:
41
+ output_dir = run_dir / cfg.dataset
42
+ else:
43
+ output_dir = run_dir / f'{cfg.dataset}-{cfg.output_name}'
44
+ output_dir.mkdir(parents=True, exist_ok=True)
45
+
46
+ # load a pretrained model
47
+ seq_cfg.duration = cfg.duration_s
48
+ net: MMAudio = get_my_mmaudio(cfg.model).to(device).eval()
49
+
50
+ # todo May 10
51
+ if model.model_path is None:
52
+ if model.model_name == 'small_44k':
53
+ model.model_path = Path(cfg.small_44k_pretrained_ckpt_path)
54
+ else:
55
+ raise ValueError('Given Model Is Not Supported !')
56
+
57
+ net.load_weights(torch.load(model.model_path, map_location=device, weights_only=True))
58
+ log.info(f'Loaded weights from {model.model_path}')
59
+ net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len)
60
+ log.info(f'Latent seq len: {seq_cfg.latent_seq_len}')
61
+ log.info(f'Clip seq len: {seq_cfg.clip_seq_len}')
62
+ log.info(f'Sync seq len: {seq_cfg.sync_seq_len}')
63
+
64
+ # misc setup
65
+ rng = torch.Generator(device=device)
66
+ rng.manual_seed(cfg.seed)
67
+ fm = FlowMatching(cfg.sampling.min_sigma,
68
+ inference_mode=cfg.sampling.method,
69
+ num_steps=cfg.sampling.num_steps)
70
+
71
+ feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path,
72
+ synchformer_ckpt=model.synchformer_ckpt,
73
+ enable_conditions=True,
74
+ mode=model.mode,
75
+ bigvgan_vocoder_ckpt=model.bigvgan_16k_path,
76
+ need_vae_encoder=False)
77
+ feature_utils = feature_utils.to(device).eval()
78
+
79
+ if cfg.compile:
80
+ net.preprocess_conditions = torch.compile(net.preprocess_conditions)
81
+ net.predict_flow = torch.compile(net.predict_flow)
82
+ feature_utils.compile()
83
+
84
+ dataset, loader = setup_eval_dataset(cfg.dataset, cfg)
85
+
86
+ with torch.amp.autocast(enabled=cfg.amp, dtype=torch.bfloat16, device_type=device):
87
+ for batch in tqdm(loader):
88
+ audios_bank = generate_dpo(batch.get('clip_video', None),
89
+ batch.get('sync_video', None),
90
+ batch.get('caption', None),
91
+ feature_utils=feature_utils,
92
+ net=net,
93
+ fm=fm,
94
+ rng=rng,
95
+ cfg_strength=cfg.cfg_strength,
96
+ clip_batch_size_multiplier=64,
97
+ sync_batch_size_multiplier=64,
98
+ num_samples_per_video=cfg.num_samples_per_video)
99
+
100
+ # load video frames
101
+ video_paths = batch['video_path']
102
+ video_info_dict = {}
103
+ for video_path in video_paths:
104
+ video_info = load_full_video_frames(video_path, cfg.duration_s)
105
+ video_info_dict[video_path] = video_info
106
+
107
+ # same audio and video for each sample id
108
+ for sample_id, audios in enumerate(audios_bank):
109
+
110
+ audios = audios.float().cpu()
111
+ names = batch['name']
112
+
113
+ # # todo
114
+ # output_dir_audio = output_dir / f'{sample_id + 1}' / 'audios'
115
+ # output_dir_video = output_dir / f'{sample_id + 1}' / 'videos'
116
+ # output_dir_audio.mkdir(parents=True, exist_ok=True)
117
+ # output_dir_video.mkdir(parents=True, exist_ok=True)
118
+
119
+ for audio, name, video_path in zip(audios, names, video_paths):
120
+ # todo
121
+ output_dir_save = output_dir / 'generated_videos' / f'{name}' / f'{sample_id + 1}'
122
+ output_dir_save.mkdir(parents=True, exist_ok=True)
123
+
124
+ torchaudio.save(output_dir_save / f'{name}.flac', audio, seq_cfg.sampling_rate)
125
+ #video_info = load_full_video_frames(video_path, cfg.duration_s) # todo should be optimized due to repeated calculation
126
+ video_save_path = output_dir_save / f'{name}.mp4'
127
+ make_video(video_info_dict[video_path], video_save_path, audio, sampling_rate=seq_cfg.sampling_rate)
128
+
129
+
130
+ def distributed_setup():
131
+ distributed.init_process_group(backend="nccl")
132
+ local_rank = distributed.get_rank()
133
+ world_size = distributed.get_world_size()
134
+ log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}')
135
+ return local_rank, world_size
136
+
137
+
138
+ if __name__ == '__main__':
139
+ distributed_setup()
140
+
141
+ main()
142
+
143
+ # clean-up
144
+ distributed.destroy_process_group()
demo.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from argparse import ArgumentParser
3
+ from pathlib import Path
4
+
5
+ import torch
6
+ import torchaudio
7
+
8
+ from mmaudio.eval_utils import (ModelConfig, all_model_cfg, generate, load_video, make_video,
9
+ setup_eval_logging)
10
+ from mmaudio.model.flow_matching import FlowMatching
11
+ #from mmaudio.model.networks import MMAudio, get_my_mmaudio
12
+ from mmaudio.model.networks_new import MMAudio, get_my_mmaudio
13
+
14
+ from mmaudio.model.utils.features_utils import FeaturesUtils
15
+
16
+ torch.backends.cuda.matmul.allow_tf32 = True
17
+ torch.backends.cudnn.allow_tf32 = True
18
+
19
+ log = logging.getLogger()
20
+
21
+
22
+ @torch.inference_mode()
23
+ def main():
24
+ setup_eval_logging()
25
+
26
+ parser = ArgumentParser()
27
+ parser.add_argument('--variant',
28
+ type=str,
29
+ default='small_16k',
30
+ help='small_16k, small_44k, medium_44k, large_44k, large_44k_v2')
31
+ parser.add_argument('--video', type=Path, help='Path to the video file')
32
+ parser.add_argument('--prompt', type=str, help='Input prompt', default='')
33
+ parser.add_argument('--negative_prompt', type=str, help='Negative prompt', default='')
34
+ parser.add_argument('--duration', type=float, default=8.0)
35
+ parser.add_argument('--cfg_strength', type=float, default=4.5)
36
+ parser.add_argument('--num_steps', type=int, default=25)
37
+
38
+ parser.add_argument('--mask_away_clip', action='store_true')
39
+
40
+ parser.add_argument('--output', type=Path, help='Output directory', default='./output/demo_lumina_v2a')
41
+ parser.add_argument('--seed', type=int, help='Random seed', default=42)
42
+ parser.add_argument('--skip_video_composite', action='store_true')
43
+ parser.add_argument('--full_precision', action='store_true')
44
+
45
+ args = parser.parse_args()
46
+
47
+ if args.variant not in all_model_cfg:
48
+ raise ValueError(f'Unknown model variant: {args.variant}')
49
+ model: ModelConfig = all_model_cfg[args.variant]
50
+ #model.download_if_needed()
51
+ seq_cfg = model.seq_cfg
52
+
53
+ if args.video:
54
+ video_path: Path = Path(args.video).expanduser()
55
+ else:
56
+ video_path = None
57
+ prompt: str = args.prompt
58
+ negative_prompt: str = args.negative_prompt
59
+ output_dir: str = args.output.expanduser()
60
+ seed: int = args.seed
61
+ num_steps: int = args.num_steps
62
+ duration: float = args.duration
63
+ cfg_strength: float = args.cfg_strength
64
+ skip_video_composite: bool = args.skip_video_composite
65
+ mask_away_clip: bool = args.mask_away_clip
66
+
67
+ device = 'cpu'
68
+ if torch.cuda.is_available():
69
+ device = 'cuda'
70
+ elif torch.backends.mps.is_available():
71
+ device = 'mps'
72
+ else:
73
+ log.warning('CUDA/MPS are not available, running on CPU')
74
+ dtype = torch.float32 if args.full_precision else torch.bfloat16
75
+
76
+ output_dir.mkdir(parents=True, exist_ok=True)
77
+
78
+ # load a pretrained model
79
+ net: MMAudio = get_my_mmaudio(model.model_name).to(device, dtype).eval()
80
+ net.load_weights(torch.load(model.model_path, map_location=device, weights_only=True))
81
+ log.info(f'Loaded weights from {model.model_path}')
82
+
83
+ # misc setup
84
+ rng = torch.Generator(device=device)
85
+ rng.manual_seed(seed)
86
+ fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=num_steps)
87
+
88
+ feature_utils = FeaturesUtils(tod_vae_ckpt=model.vae_path,
89
+ synchformer_ckpt=model.synchformer_ckpt,
90
+ enable_conditions=True,
91
+ mode=model.mode,
92
+ bigvgan_vocoder_ckpt=model.bigvgan_16k_path,
93
+ need_vae_encoder=False)
94
+ feature_utils = feature_utils.to(device, dtype).eval()
95
+
96
+ if video_path is not None:
97
+ log.info(f'Using video {video_path}')
98
+ video_info = load_video(video_path, duration)
99
+ clip_frames = video_info.clip_frames
100
+ sync_frames = video_info.sync_frames
101
+ duration = video_info.duration_sec
102
+ if mask_away_clip:
103
+ clip_frames = None
104
+ else:
105
+ clip_frames = clip_frames.unsqueeze(0)
106
+ sync_frames = sync_frames.unsqueeze(0)
107
+ else:
108
+ log.info('No video provided -- text-to-audio mode')
109
+ clip_frames = sync_frames = None
110
+
111
+ seq_cfg.duration = duration
112
+ net.update_seq_lengths(seq_cfg.latent_seq_len, seq_cfg.clip_seq_len, seq_cfg.sync_seq_len)
113
+
114
+ log.info(f'Prompt: {prompt}')
115
+ log.info(f'Negative prompt: {negative_prompt}')
116
+
117
+ audios = generate(clip_frames,
118
+ sync_frames, [prompt],
119
+ negative_text=[negative_prompt],
120
+ feature_utils=feature_utils,
121
+ net=net,
122
+ fm=fm,
123
+ rng=rng,
124
+ cfg_strength=cfg_strength)
125
+ audio = audios.float().cpu()[0]
126
+ if video_path is not None:
127
+ save_path = output_dir / f'{video_path.stem}.flac'
128
+ else:
129
+ safe_filename = prompt.replace(' ', '_').replace('/', '_').replace('.', '')
130
+ save_path = output_dir / f'{safe_filename}.flac'
131
+ torchaudio.save(save_path, audio, seq_cfg.sampling_rate)
132
+
133
+ log.info(f'Audio saved to {save_path}')
134
+ if video_path is not None and not skip_video_composite:
135
+ video_save_path = output_dir / f'{video_path.stem}.mp4'
136
+ make_video(video_info, video_save_path, audio, sampling_rate=seq_cfg.sampling_rate)
137
+ log.info(f'Video saved to {output_dir / video_save_path}')
138
+
139
+ log.info('Memory usage: %.2f GB', torch.cuda.max_memory_allocated() / (2**30))
140
+
141
+
142
+ if __name__ == '__main__':
143
+ main()
download_hg.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import open_clip
2
+ from open_clip import create_model_from_pretrained
3
+
4
+ #clip_model = create_model_from_pretrained('hf-hub:apple/DFN5B-CLIP-ViT-H-14-384', return_transform=False)
5
+ #print(clip_model)
6
+
7
+ #tokenizer = open_clip.get_tokenizer('ViT-H-14-378-quickgelu')
8
+ #print(tokenizer)
9
+
10
+ #import gdown
11
+ #url = "https://drive.google.com/uc?id=1vxUJ6ILoBkBtj7Ji9EHJCaJvR35np6sX"
12
+ #output = "/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/dataset/AudioCaps/test.zip"
13
+ #gdown.download(url, output)
14
+
15
+ from mmaudio.ext.bigvgan_v2.bigvgan import BigVGAN as BigVGANv2
16
+
17
+ vocoder = BigVGANv2.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', use_cuda_kernel=False)
18
+ print(vocoder)
pyproject.toml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [tool.hatch.metadata]
6
+ allow-direct-references = true
7
+
8
+ [tool.yapf]
9
+ based_on_style = "pep8"
10
+ indent_width = 4
11
+ column_limit = 100
12
+
13
+ [tool.isort]
14
+ line_length = 100
15
+
16
+ [project]
17
+ name = "mmaudio"
18
+ version = "1.0.0"
19
+ authors = [{ name = "Rex Cheng", email = "hkchengrex@gmail.com" }]
20
+ description = "MMAudio generates synchronized audio given video and/or text inputs"
21
+ readme = "README.md"
22
+ requires-python = ">=3.9"
23
+ classifiers = [
24
+ "Programming Language :: Python :: 3",
25
+ "Operating System :: OS Independent",
26
+ ]
27
+ dependencies = [
28
+ 'torch >= 2.5.1',
29
+ 'huggingface_hub >= 0.26',
30
+ 'cython',
31
+ 'gitpython >= 3.1',
32
+ 'tensorboard >= 2.11',
33
+ 'numpy >= 1.21, <2.1',
34
+ 'Pillow >= 9.5',
35
+ 'opencv-python >= 4.8',
36
+ 'scipy >= 1.7',
37
+ 'tqdm >= 4.66.1',
38
+ 'gradio >= 3.34',
39
+ 'einops >= 0.6',
40
+ 'hydra-core >= 1.3.2',
41
+ 'requests',
42
+ 'torchdiffeq >= 0.2.5',
43
+ 'librosa >= 0.8.1',
44
+ 'nitrous-ema',
45
+ 'hydra_colorlog',
46
+ 'tensordict >= 0.6.1',
47
+ 'colorlog',
48
+ 'open_clip_torch >= 2.29.0',
49
+ 'av >= 14.0.1',
50
+ 'timm >= 1.0.12',
51
+ 'python-dotenv',
52
+ ]
53
+
54
+ [tool.hatch.build.targets.wheel]
55
+ packages = ["mmaudio"]
reward_models/ib_at_sync.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import torch
6
+ from colorlog import ColoredFormatter
7
+ from einops import rearrange
8
+
9
+ from ib_sync_rewards.imagebind.models import imagebind_model
10
+ from ib_sync_rewards.imagebind.models.imagebind_model import ModalityType
11
+
12
+ from ib_sync_rewards.imagebind.models.multimodal_preprocessors import SimpleTokenizer
13
+ from torch.utils.data import DataLoader
14
+ from tqdm import tqdm
15
+
16
+ from ib_sync_rewards.args import get_eval_parser
17
+ from ib_sync_rewards.data.video_dataset import AudioDataset, pad_or_truncate, error_avoidance_collate
18
+ from ib_sync_rewards.synchformer.synchformer import Synchformer, make_class_grid
19
+
20
+ import torchaudio
21
+ import json
22
+ import pandas as pd
23
+
24
+ _syncformer_ckpt_path = Path(__file__).parent / 'ib_sync_rewards' / 'weights' / 'synchformer_state_dict.pth'
25
+ log = logging.getLogger()
26
+ device = 'cuda'
27
+
28
+ LOGFORMAT = "[%(log_color)s%(levelname)-8s%(reset)s]: %(log_color)s%(message)s%(reset)s"
29
+
30
+
31
+ def setup_eval_logging(log_level: int = logging.INFO):
32
+ logging.root.setLevel(log_level)
33
+ formatter = ColoredFormatter(LOGFORMAT)
34
+ stream = logging.StreamHandler()
35
+ stream.setLevel(log_level)
36
+ stream.setFormatter(formatter)
37
+ log = logging.getLogger()
38
+ log.setLevel(log_level)
39
+ log.addHandler(stream)
40
+
41
+
42
+ setup_eval_logging()
43
+
44
+
45
+ def encode_video_with_sync(synchformer: Synchformer, x: torch.Tensor) -> torch.Tensor:
46
+ # x: (B, T, C, H, W) H/W: 224
47
+
48
+ b, t, c, h, w = x.shape
49
+ assert c == 3 and h == 224 and w == 224
50
+
51
+ # partition the video
52
+ segment_size = 16
53
+ step_size = 8
54
+ num_segments = (t - segment_size) // step_size + 1
55
+ segments = []
56
+ for i in range(num_segments):
57
+ segments.append(x[:, i * step_size:i * step_size + segment_size])
58
+ x = torch.stack(segments, dim=1) # (B, S, T, C, H, W)
59
+
60
+ x = rearrange(x, 'b s t c h w -> (b s) 1 t c h w')
61
+ x = synchformer.extract_vfeats(x)
62
+ x = rearrange(x, '(b s) 1 t d -> b s t d', b=b)
63
+ return x
64
+
65
+
66
+ def encode_video_with_imagebind(imagebind: imagebind_model, x: torch.Tensor) -> torch.Tensor:
67
+ # x: B * NUM_CROPS * T * C * H * W
68
+ clips = []
69
+ b, num_crops, t, c, h, w = x.shape
70
+ for i in range(t - 1):
71
+ clips.append(x[:, :, i:i + 2])
72
+ clips = torch.cat(clips, dim=1)
73
+
74
+ # clips: B * (NUM_CROPS * NUM_CLIPS) * 2 * C * H * W
75
+ clips = rearrange(clips, 'b n t c h w -> b n c t h w')
76
+
77
+ emb = imagebind({ModalityType.VISION: clips})
78
+ return emb[ModalityType.VISION]
79
+
80
+ def encode_audio_with_sync(synchformer: Synchformer, x: torch.Tensor,
81
+ mel: torchaudio.transforms.MelSpectrogram) -> torch.Tensor:
82
+ b, t = x.shape
83
+
84
+ # partition the video
85
+ segment_size = 10240
86
+ step_size = 10240 // 2
87
+ num_segments = (t - segment_size) // step_size + 1
88
+ segments = []
89
+ for i in range(num_segments):
90
+ segments.append(x[:, i * step_size:i * step_size + segment_size])
91
+ x = torch.stack(segments, dim=1) # (B, S, T, C, H, W)
92
+
93
+ x = mel(x)
94
+ x = torch.log(x + 1e-6)
95
+ x = pad_or_truncate(x, 66)
96
+
97
+ mean = -4.2677393
98
+ std = 4.5689974
99
+ x = (x - mean) / (2 * std)
100
+ # x: B * S * 128 * 66
101
+ x = synchformer.extract_afeats(x.unsqueeze(2))
102
+ return x
103
+
104
+ @torch.inference_mode()
105
+ def extract(args):
106
+ video_path: Path = args.video_path.expanduser() # todo video_folder_path
107
+ json_path: Path = args.json_path # todo path to the json file
108
+ output_dir: Path = args.output_dir # todo path to the save the tsv file
109
+ audio_length: float = args.audio_length
110
+ num_workers: int = args.num_workers
111
+ batch_size: int = args.gt_batch_size
112
+
113
+ log.info('Extracting features...')
114
+
115
+
116
+ with open(json_path, 'r') as f:
117
+ data = json.load(f)
118
+
119
+ video_id_caption_dict = {}
120
+ for each_data in data:
121
+ video_id = each_data[0]['video_id']
122
+ caption = each_data[0]['caption']
123
+ video_id_caption_dict[video_id] = caption
124
+
125
+
126
+ # todo read all the file names
127
+ video_names = os.listdir(video_path)
128
+ video_name_paths = [video_path / f for f in video_names] # todo ./video_path/video_name
129
+
130
+ samples_per_video = sorted(list(map(int, os.listdir(video_name_paths[0]))))
131
+ samples_per_video = list(map(str, samples_per_video)) # todo [1, 2, ..., 10]
132
+
133
+ #samples_per_video = os.listdir(video_name_paths[0]) # todo 1,2,...,10 should be sorted?
134
+
135
+ log.info(f'{len(video_name_paths)} videos found.')
136
+ log.info(f'{len(samples_per_video)} samples are found in each video.')
137
+
138
+ # todo load pre-trained weights for ImageBind
139
+ imagebind = imagebind_model.imagebind_huge(pretrained=True).to(device).eval()
140
+ # todo May 9
141
+ tokenizer = SimpleTokenizer(bpe_path='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/reward_models/ib_sync_rewards/imagebind/bpe/bpe_simple_vocab_16e6.txt.gz')
142
+
143
+ total_outputs = []
144
+ for sample_idx in samples_per_video:
145
+ log.info(f'Starting to extracting features in sample index: {sample_idx}')
146
+ video_paths = [f / sample_idx / f'{os.path.basename(f)}.mp4' for f in video_name_paths]
147
+ audio_paths = [f / sample_idx / f'{os.path.basename(f)}.flac' for f in video_name_paths]
148
+
149
+ log.info(f'{len(video_paths)} videos found.')
150
+
151
+ dataset = AudioDataset(video_paths, audio_paths, duration_sec=audio_length, video_id_caption=video_id_caption_dict) # todo
152
+ loader = DataLoader(dataset,
153
+ batch_size=batch_size,
154
+ num_workers=num_workers,
155
+ collate_fn=error_avoidance_collate
156
+ ) # collate_fn=error_avoidance_collate
157
+
158
+ output_for_each_sample_idx_dict = {}
159
+
160
+ for data in tqdm(loader):
161
+ names = data['name']
162
+
163
+ # todo for audio
164
+ ib_audio = data['ib_audio'].squeeze(1).to(device)
165
+
166
+ # todo for text feature extraction. # todo May 9
167
+ ib_text = data['label']
168
+ ib_text_tokens = [tokenizer(t).unsqueeze(0).to(device) for t in ib_text]
169
+ ib_text_tokens = torch.cat(ib_text_tokens, dim=0)
170
+ #ib_text_tokens = tokenizer(ib_text).to(device)
171
+ ib_text_features = imagebind({ModalityType.TEXT: ib_text_tokens})[ModalityType.TEXT].cpu().detach()
172
+
173
+ # todo for audio feature extraction
174
+ ib_audio_features = imagebind({ModalityType.AUDIO: ib_audio})[ModalityType.AUDIO].cpu().detach()
175
+
176
+ # calculate imagebind_at metrics # todo May 9
177
+ ib_at_scores = torch.cosine_similarity(ib_text_features, ib_audio_features, dim=-1)
178
+
179
+ for i, n in enumerate(names):
180
+ each_output = {
181
+ 'id': n,
182
+ 'label': video_id_caption_dict[n],
183
+ 'ib_at_score': ib_at_scores[i].item()
184
+ }
185
+ output_for_each_sample_idx_dict[n] = each_output
186
+
187
+
188
+ total_outputs.append(output_for_each_sample_idx_dict)
189
+
190
+
191
+ # todo combine different sample_idx splits
192
+ log.info('Combining and Saving Metrics...')
193
+ saved_ib_at_output_full = [] # todo May 9
194
+ saved_ib_at_output_dpo = [] # todo May 9
195
+
196
+ video_id_list = total_outputs[0].keys()
197
+ for video_id in tqdm(video_id_list):
198
+ # todo May 9
199
+ outputs_ib_at_metrics = {
200
+ 'id': video_id,
201
+ 'label': video_id_caption_dict[video_id]
202
+ }
203
+
204
+ ib_at_scores_for_each_video = [] # todo May 9
205
+
206
+ for idx, each_sample_idx_dict in enumerate(total_outputs):
207
+ # todo May 9
208
+ ib_at_scores_for_each_video.append(each_sample_idx_dict[video_id]['ib_at_score'])
209
+ outputs_ib_at_metrics[str(idx+1)] = each_sample_idx_dict[video_id]['ib_at_score']
210
+
211
+
212
+ # todo May 9
213
+ outputs_ib_at_dpo = {
214
+ 'id': video_id,
215
+ 'label': video_id_caption_dict[video_id],
216
+ 'chosen': ib_at_scores_for_each_video.index(max(ib_at_scores_for_each_video)) + 1,
217
+ 'reject': ib_at_scores_for_each_video.index(min(ib_at_scores_for_each_video)) + 1
218
+ }
219
+
220
+ saved_ib_at_output_full.append(outputs_ib_at_metrics) # todo May 9
221
+
222
+ saved_ib_at_output_dpo.append(outputs_ib_at_dpo) # todo May 9
223
+
224
+ # todo May 9
225
+ output_ib_at_full_df = pd.DataFrame(saved_ib_at_output_full)
226
+ output_ib_at_full_df.to_csv(os.path.join(output_dir, 'imagebind_at_score.tsv'), sep='\t', index=False)
227
+
228
+ output_ib_at_dpo_df = pd.DataFrame(saved_ib_at_output_dpo)
229
+ output_ib_at_dpo_df.to_csv(os.path.join(output_dir, 'dpo_imagebind_at.tsv'), sep='\t', index=False)
230
+
231
+
232
+ if __name__ == '__main__':
233
+ logging.basicConfig(level=logging.INFO)
234
+
235
+ parser = get_eval_parser()
236
+ parser.add_argument('--video_path', type=Path, required=True, help='Path to the video files')
237
+ args = parser.parse_args()
238
+ extract(args)
save_ema.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Optional
3
+
4
+ from nitrous_ema import PostHocEMA
5
+
6
+ from mmaudio.model.networks_new import get_my_mmaudio
7
+
8
+
9
+ def synthesize_ema(sigma: float, step: Optional[int]):
10
+ vae = get_my_mmaudio('small_44k')
11
+ emas = PostHocEMA(vae,
12
+ sigma_rels=[0.05, 0.1],
13
+ update_every=1,
14
+ checkpoint_every_num_steps=5000,
15
+ checkpoint_folder='/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/vgg_only_small_44k_new_model_feb1/ema_ckpts')
16
+
17
+ synthesized_ema = emas.synthesize_ema_model(sigma_rel=sigma, step=step, device='cpu')
18
+ state_dict = synthesized_ema.ema_model.state_dict()
19
+ return state_dict
20
+
21
+
22
+
23
+ # Synthesize EMA
24
+
25
+ ema_sigma = 0.05
26
+ print('Start !!!')
27
+ state_dict = synthesize_ema(ema_sigma, step=None)
28
+ save_dir = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/kwang/MMAudio/output/vgg_only_small_44k_new_model_feb1/vgg_only_small_44k_new_model_feb1_ema_final.pth'
29
+ torch.save(state_dict, save_dir)
30
+ print('Finished !!!')
31
+
test_try.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+
4
+ # '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/zhoutao-240108120126/datasets/audio-visual/vggsound/vggsound.csv' #
5
+ csv_path = '/inspire/hdd/ws-f4d69b29-e0a5-44e6-bd92-acf4de9990f0/gaopeng/public/kwang/datasets/vggsound/vggsound-caption.csv'
6
+
7
+ df = pd.read_csv(csv_path, header=None, names=['id', 'sec', 'caption', 'split']).to_dict(orient='records')
8
+
9
+ for row in df:
10
+ start_sec = int(row['sec'])
11
+ video_id = str(row['id'])
12
+ # this is how our videos are named
13
+ video_name = f'{video_id}_{start_sec:06d}'
14
+ caption = row['caption']
15
+ print(start_sec)
16
+ print(video_id)
17
+ print(caption)
18
+ break
train.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import logging
3
+ import math
4
+ import random
5
+ from datetime import timedelta
6
+ from pathlib import Path
7
+
8
+ import hydra
9
+ import numpy as np
10
+ import torch
11
+ import torch.distributed as distributed
12
+ from hydra import compose
13
+ from hydra.core.hydra_config import HydraConfig
14
+ from omegaconf import DictConfig, open_dict
15
+ from torch.distributed.elastic.multiprocessing.errors import record
16
+
17
+ from mmaudio.data.data_setup import setup_training_datasets, setup_val_datasets
18
+ from mmaudio.model.sequence_config import CONFIG_16K, CONFIG_44K
19
+ from mmaudio.runner import Runner
20
+ from mmaudio.sample import sample
21
+ from mmaudio.utils.dist_utils import info_if_rank_zero, local_rank, world_size
22
+ from mmaudio.utils.logger import TensorboardLogger
23
+ from mmaudio.utils.synthesize_ema import synthesize_ema
24
+
25
+ torch.backends.cuda.matmul.allow_tf32 = True
26
+ torch.backends.cudnn.allow_tf32 = True
27
+
28
+ log = logging.getLogger()
29
+
30
+
31
+ def distributed_setup():
32
+ distributed.init_process_group(backend="nccl", timeout=timedelta(hours=2))
33
+ log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}')
34
+ return local_rank, world_size
35
+
36
+
37
+ @record
38
+ @hydra.main(version_base='1.3.2', config_path='config', config_name='train_config.yaml')
39
+ def train(cfg: DictConfig):
40
+ # initial setup
41
+ torch.cuda.set_device(local_rank)
42
+ torch.backends.cudnn.benchmark = cfg.cudnn_benchmark
43
+ distributed_setup()
44
+ num_gpus = world_size
45
+ run_dir = HydraConfig.get().run.dir
46
+
47
+ # compose early such that it does not rely on future hard disk reading
48
+ eval_cfg = compose('eval_config', overrides=[f'exp_id={cfg.exp_id}'])
49
+
50
+ # patch data dim
51
+ if cfg.model.endswith('16k'):
52
+ seq_cfg = CONFIG_16K
53
+ elif cfg.model.endswith('44k'):
54
+ seq_cfg = CONFIG_44K
55
+ else:
56
+ raise ValueError(f'Unknown model: {cfg.model}')
57
+ with open_dict(cfg):
58
+ cfg.data_dim.latent_seq_len = seq_cfg.latent_seq_len
59
+ cfg.data_dim.clip_seq_len = seq_cfg.clip_seq_len
60
+ cfg.data_dim.sync_seq_len = seq_cfg.sync_seq_len
61
+
62
+ # wrap python logger with a tensorboard logger
63
+ log = TensorboardLogger(cfg.exp_id,
64
+ run_dir,
65
+ logging.getLogger(),
66
+ is_rank0=(local_rank == 0),
67
+ enable_email=cfg.enable_email and not cfg.debug)
68
+
69
+ info_if_rank_zero(log, f'All configuration: {cfg}')
70
+ info_if_rank_zero(log, f'Number of GPUs detected: {num_gpus}')
71
+
72
+ # number of dataloader workers
73
+ info_if_rank_zero(log, f'Number of dataloader workers (per GPU): {cfg.num_workers}')
74
+
75
+ # Set seeds to ensure the same initialization
76
+ torch.manual_seed(cfg.seed)
77
+ np.random.seed(cfg.seed)
78
+ random.seed(cfg.seed)
79
+
80
+ # setting up configurations
81
+ info_if_rank_zero(log, f'Training configuration: {cfg}')
82
+ cfg.batch_size //= num_gpus
83
+ info_if_rank_zero(log, f'Batch size (per GPU): {cfg.batch_size}')
84
+
85
+ # determine time to change max skip
86
+ total_iterations = cfg['num_iterations']
87
+
88
+ # setup datasets
89
+ dataset, sampler, loader = setup_training_datasets(cfg)
90
+ info_if_rank_zero(log, f'Number of training samples: {len(dataset)}')
91
+ info_if_rank_zero(log, f'Number of training batches: {len(loader)}')
92
+
93
+ val_dataset, val_loader, eval_loader = setup_val_datasets(cfg)
94
+ info_if_rank_zero(log, f'Number of val samples: {len(val_dataset)}')
95
+ val_cfg = cfg.data.ExtractedVGG_val
96
+
97
+ # compute and set mean and std
98
+ latent_mean, latent_std = dataset.compute_latent_stats()
99
+
100
+ # construct the trainer
101
+ trainer = Runner(cfg,
102
+ log=log,
103
+ run_path=run_dir,
104
+ for_training=True,
105
+ latent_mean=latent_mean,
106
+ latent_std=latent_std).enter_train()
107
+ eval_rng_clone = trainer.rng.graphsafe_get_state()
108
+
109
+ # load previous checkpoint if needed
110
+ if cfg['checkpoint'] is not None:
111
+ curr_iter = trainer.load_checkpoint(cfg['checkpoint'])
112
+ cfg['checkpoint'] = None
113
+ info_if_rank_zero(log, 'Model checkpoint loaded!')
114
+ else:
115
+ # if run_dir exists, load the latest checkpoint
116
+ checkpoint = trainer.get_latest_checkpoint_path()
117
+ if checkpoint is not None:
118
+ curr_iter = trainer.load_checkpoint(checkpoint)
119
+ info_if_rank_zero(log, 'Latest checkpoint loaded!')
120
+ else:
121
+ # load previous network weights if needed
122
+ curr_iter = 0
123
+ if cfg['weights'] is not None:
124
+ info_if_rank_zero(log, 'Loading weights from the disk')
125
+ trainer.load_weights(cfg['weights'])
126
+ cfg['weights'] = None
127
+
128
+ # determine max epoch
129
+ total_epoch = math.ceil(total_iterations / len(loader))
130
+ current_epoch = curr_iter // len(loader)
131
+ info_if_rank_zero(log, f'We will approximately use {total_epoch} epochs.')
132
+
133
+ # training loop
134
+ try:
135
+ # Need this to select random bases in different workers
136
+ np.random.seed(np.random.randint(2**30 - 1) + local_rank * 1000)
137
+ while curr_iter < total_iterations:
138
+ # Crucial for randomness!
139
+ sampler.set_epoch(current_epoch)
140
+ current_epoch += 1
141
+ log.debug(f'Current epoch: {current_epoch}')
142
+
143
+ trainer.enter_train()
144
+ trainer.log.data_timer.start()
145
+ for data in loader:
146
+ trainer.train_pass(data, curr_iter)
147
+
148
+ if (curr_iter + 1) % cfg.val_interval == 0:
149
+ # swap into a eval rng state, i.e., use the same seed for every validation pass
150
+ train_rng_snapshot = trainer.rng.graphsafe_get_state()
151
+ trainer.rng.graphsafe_set_state(eval_rng_clone)
152
+ info_if_rank_zero(log, f'Iteration {curr_iter}: validating')
153
+ for data in val_loader:
154
+ trainer.validation_pass(data, curr_iter)
155
+ distributed.barrier()
156
+ trainer.val_integrator.finalize('val', curr_iter, ignore_timer=True)
157
+ trainer.rng.graphsafe_set_state(train_rng_snapshot)
158
+ ''' # todo Jan 12
159
+ if (curr_iter + 1) % cfg.eval_interval == 0:
160
+ save_eval = (curr_iter + 1) % cfg.save_eval_interval == 0
161
+ train_rng_snapshot = trainer.rng.graphsafe_get_state()
162
+ trainer.rng.graphsafe_set_state(eval_rng_clone)
163
+ info_if_rank_zero(log, f'Iteration {curr_iter}: validating')
164
+ for data in eval_loader:
165
+ audio_path = trainer.inference_pass(data,
166
+ curr_iter,
167
+ val_cfg,
168
+ save_eval=save_eval)
169
+ distributed.barrier()
170
+ trainer.rng.graphsafe_set_state(train_rng_snapshot)
171
+ trainer.eval(audio_path, curr_iter, 1, val_cfg)'''
172
+
173
+ curr_iter += 1
174
+
175
+ if curr_iter >= total_iterations:
176
+ break
177
+ except Exception as e:
178
+ log.error(f'Error occurred at iteration {curr_iter}!')
179
+ log.critical(e.message if hasattr(e, 'message') else str(e))
180
+ raise
181
+ finally:
182
+ if not cfg.debug:
183
+ trainer.save_checkpoint(curr_iter)
184
+ trainer.save_weights(curr_iter)
185
+
186
+ # Inference pass
187
+ del trainer
188
+ torch.cuda.empty_cache()
189
+
190
+ # Synthesize EMA
191
+ if local_rank == 0:
192
+ log.info(f'Synthesizing EMA with sigma={cfg.ema.default_output_sigma}')
193
+ ema_sigma = cfg.ema.default_output_sigma
194
+ state_dict = synthesize_ema(cfg, ema_sigma, step=None)
195
+ save_dir = Path(run_dir) / f'{cfg.exp_id}_ema_final.pth'
196
+ torch.save(state_dict, save_dir)
197
+ log.info(f'Synthesized EMA saved to {save_dir}!')
198
+ distributed.barrier()
199
+
200
+ # todo Jan 12
201
+ #log.info(f'Evaluation: {eval_cfg}')
202
+ #sample(eval_cfg)
203
+
204
+ # clean-up
205
+ log.complete()
206
+ distributed.barrier()
207
+ distributed.destroy_process_group()
208
+
209
+
210
+ if __name__ == '__main__':
211
+ train()
train_dpo-Copy1.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import logging
3
+ import math
4
+ import random
5
+ from datetime import timedelta
6
+ from pathlib import Path
7
+
8
+ import hydra
9
+ import numpy as np
10
+ import torch
11
+ import torch.distributed as distributed
12
+ from hydra import compose
13
+ from hydra.core.hydra_config import HydraConfig
14
+ from omegaconf import DictConfig, open_dict
15
+ from torch.distributed.elastic.multiprocessing.errors import record
16
+
17
+ from mmaudio.data.data_setup import setup_training_datasets, setup_val_datasets
18
+ from mmaudio.model.sequence_config import CONFIG_16K, CONFIG_44K
19
+ from mmaudio.runner import Runner
20
+ from mmaudio.sample import sample
21
+ from mmaudio.utils.dist_utils import info_if_rank_zero, local_rank, world_size
22
+ from mmaudio.utils.logger import TensorboardLogger
23
+ from mmaudio.utils.synthesize_ema import synthesize_ema, synthesize_ema_dpo
24
+
25
+ torch.backends.cuda.matmul.allow_tf32 = True
26
+ torch.backends.cudnn.allow_tf32 = True
27
+
28
+ log = logging.getLogger()
29
+
30
+
31
+ def distributed_setup():
32
+ distributed.init_process_group(backend="nccl", timeout=timedelta(hours=2))
33
+ log.info(f'Initialized: local_rank={local_rank}, world_size={world_size}')
34
+ return local_rank, world_size
35
+
36
+
37
+ @record
38
+ @hydra.main(version_base='1.3.2', config_path='config', config_name='train_dpo_config.yaml') # todo Mar 2
39
+ def train(cfg: DictConfig):
40
+ # initial setup
41
+ torch.cuda.set_device(local_rank)
42
+ torch.backends.cudnn.benchmark = cfg.cudnn_benchmark
43
+ distributed_setup()
44
+ num_gpus = world_size
45
+ run_dir = HydraConfig.get().run.dir
46
+
47
+ # compose early such that it does not rely on future hard disk reading
48
+ eval_cfg = compose('eval_config', overrides=[f'exp_id={cfg.exp_id}'])
49
+
50
+ # patch data dim
51
+ if cfg.model.endswith('16k'):
52
+ seq_cfg = CONFIG_16K
53
+ elif cfg.model.endswith('44k'):
54
+ seq_cfg = CONFIG_44K
55
+ else:
56
+ raise ValueError(f'Unknown model: {cfg.model}')
57
+ with open_dict(cfg):
58
+ cfg.data_dim.latent_seq_len = seq_cfg.latent_seq_len
59
+ cfg.data_dim.clip_seq_len = seq_cfg.clip_seq_len
60
+ cfg.data_dim.sync_seq_len = seq_cfg.sync_seq_len
61
+
62
+ # wrap python logger with a tensorboard logger
63
+ log = TensorboardLogger(cfg.exp_id,
64
+ run_dir,
65
+ logging.getLogger(),
66
+ is_rank0=(local_rank == 0),
67
+ enable_email=cfg.enable_email and not cfg.debug)
68
+
69
+ info_if_rank_zero(log, f'All configuration: {cfg}')
70
+ info_if_rank_zero(log, f'Number of GPUs detected: {num_gpus}')
71
+
72
+ # number of dataloader workers
73
+ info_if_rank_zero(log, f'Number of dataloader workers (per GPU): {cfg.num_workers}')
74
+
75
+ # Set seeds to ensure the same initialization
76
+ torch.manual_seed(cfg.seed)
77
+ np.random.seed(cfg.seed)
78
+ random.seed(cfg.seed)
79
+
80
+ # setting up configurations
81
+ info_if_rank_zero(log, f'Training configuration: {cfg}')
82
+ cfg.batch_size //= num_gpus
83
+ info_if_rank_zero(log, f'Batch size (per GPU): {cfg.batch_size}')
84
+
85
+ # determine time to change max skip
86
+ total_iterations = cfg['num_iterations']
87
+
88
+ # setup datasets
89
+ dataset, sampler, loader = setup_training_datasets(cfg)
90
+ info_if_rank_zero(log, f'Number of training samples: {len(dataset)}')
91
+ info_if_rank_zero(log, f'Number of training batches: {len(loader)}')
92
+
93
+ val_dataset, val_loader, eval_loader = setup_val_datasets(cfg)
94
+ info_if_rank_zero(log, f'Number of val samples: {len(val_dataset)}')
95
+ val_cfg = cfg.dpo_data.ExtractedVGG_val
96
+
97
+ # compute and set mean and std
98
+ latent_mean_chosen, latent_std_chosen, latent_mean_reject, latent_std_reject = dataset.compute_latent_stats() # todo Mar 2
99
+ latent_mean_dpo = torch.stack([latent_mean_chosen, latent_mean_reject], dim=0) # todo Mar 2
100
+ latent_std_dpo = torch.stack([latent_std_chosen, latent_std_reject], dim=0) # todo Mar 2
101
+
102
+ # construct the trainer
103
+ trainer = Runner(cfg,
104
+ log=log,
105
+ run_path=run_dir,
106
+ for_training=True,
107
+ latent_mean=latent_mean_dpo,
108
+ latent_std=latent_std_dpo,
109
+ dpo_train=True).enter_train() # todo Mar 2
110
+ eval_rng_clone = trainer.rng.graphsafe_get_state()
111
+
112
+
113
+ # todo load previous checkpoint if needed (including model weights, ema, and optimizer, scheduler)
114
+ if cfg['checkpoint'] is not None:
115
+ curr_iter = trainer.load_checkpoint(cfg['checkpoint'])
116
+ cfg['checkpoint'] = None
117
+ info_if_rank_zero(log, 'Model checkpoint loaded!')
118
+ else:
119
+ # if run_dir exists, load the latest checkpoint
120
+ checkpoint = trainer.get_latest_checkpoint_path()
121
+ if checkpoint is not None:
122
+ curr_iter = trainer.load_checkpoint(checkpoint)
123
+ info_if_rank_zero(log, 'Latest checkpoint loaded!')
124
+ else:
125
+ # load previous network weights if needed # todo may be ok for dpo?
126
+ curr_iter = 0
127
+ if cfg['weights'] is not None:
128
+ info_if_rank_zero(log, 'Loading weights from the disk')
129
+ trainer.load_weights(cfg['weights'])
130
+ cfg['weights'] = None
131
+
132
+
133
+ # determine max epoch
134
+ total_epoch = math.ceil(total_iterations / len(loader))
135
+ current_epoch = curr_iter // len(loader)
136
+ info_if_rank_zero(log, f'We will approximately use {total_epoch} epochs.')
137
+
138
+ # training loop
139
+ try:
140
+ # Need this to select random bases in different workers
141
+ np.random.seed(np.random.randint(2 ** 30 - 1) + local_rank * 1000)
142
+ while curr_iter < total_iterations:
143
+ # Crucial for randomness!
144
+ sampler.set_epoch(current_epoch)
145
+ current_epoch += 1
146
+ log.debug(f'Current epoch: {current_epoch}')
147
+
148
+ trainer.enter_train()
149
+ trainer.log.data_timer.start()
150
+ for data in loader:
151
+ trainer.train_dpo_pass(data, curr_iter)
152
+
153
+ if (curr_iter + 1) % cfg.val_interval == 0:
154
+ # swap into a eval rng state, i.e., use the same seed for every validation pass
155
+ train_rng_snapshot = trainer.rng.graphsafe_get_state()
156
+ trainer.rng.graphsafe_set_state(eval_rng_clone)
157
+ info_if_rank_zero(log, f'Iteration {curr_iter}: validating')
158
+ for data in val_loader:
159
+ trainer.validation_pass(data, curr_iter)
160
+ distributed.barrier()
161
+ trainer.val_integrator.finalize('val', curr_iter, ignore_timer=True)
162
+ trainer.rng.graphsafe_set_state(train_rng_snapshot)
163
+ # todo Jan 12
164
+ # if (curr_iter + 1) % cfg.eval_interval == 0:
165
+ # save_eval = (curr_iter + 1) % cfg.save_eval_interval == 0
166
+ # train_rng_snapshot = trainer.rng.graphsafe_get_state()
167
+ # trainer.rng.graphsafe_set_state(eval_rng_clone)
168
+ # info_if_rank_zero(log, f'Iteration {curr_iter}: validating')
169
+ # for data in eval_loader:
170
+ # audio_path = trainer.inference_pass(data,
171
+ # curr_iter,
172
+ # val_cfg,
173
+ # save_eval=save_eval)
174
+ # distributed.barrier()
175
+ # trainer.rng.graphsafe_set_state(train_rng_snapshot)
176
+ # trainer.eval(audio_path, curr_iter, 1, val_cfg)
177
+
178
+ curr_iter += 1
179
+
180
+ if curr_iter >= total_iterations:
181
+ break
182
+ except Exception as e:
183
+ log.error(f'Error occurred at iteration {curr_iter}!')
184
+ log.critical(e.message if hasattr(e, 'message') else str(e))
185
+ raise
186
+ finally:
187
+ if not cfg.debug:
188
+ trainer.save_checkpoint(curr_iter)
189
+ trainer.save_weights(curr_iter)
190
+
191
+ # Inference pass
192
+ del trainer
193
+ torch.cuda.empty_cache()
194
+
195
+ # Synthesize EMA
196
+ if local_rank == 0:
197
+ log.info(f'Synthesizing EMA with sigma={cfg.ema.default_output_sigma}')
198
+ ema_sigma = cfg.ema.default_output_sigma
199
+ state_dict = synthesize_ema_dpo(cfg, ema_sigma, dpo_train=True, step=None)
200
+ save_dir = Path(run_dir) / f'{cfg.exp_id}_ema_final.pth'
201
+ torch.save(state_dict, save_dir)
202
+ log.info(f'Synthesized EMA saved to {save_dir}!')
203
+ distributed.barrier()
204
+
205
+ # todo Jan 12
206
+ # log.info(f'Evaluation: {eval_cfg}')
207
+ # sample(eval_cfg)
208
+
209
+ # clean-up
210
+ log.complete()
211
+ distributed.barrier()
212
+ distributed.destroy_process_group()
213
+
214
+
215
+ if __name__ == '__main__':
216
+ train()