Upload data via Kaggle
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +4 -0
- VibeVoice-finetuning/LICENSE +21 -0
- VibeVoice-finetuning/README.md +762 -0
- VibeVoice-finetuning/checkpoint-1800/lora/acoustic_connector/pytorch_model.bin +3 -0
- VibeVoice-finetuning/checkpoint-1800/lora/config.json +26 -0
- VibeVoice-finetuning/checkpoint-1800/lora/diffusion_head/config.json +20 -0
- VibeVoice-finetuning/checkpoint-1800/lora/diffusion_head/diffusion_head_full.bin +3 -0
- VibeVoice-finetuning/checkpoint-1800/lora/diffusion_head/model.safetensors +3 -0
- VibeVoice-finetuning/checkpoint-1800/lora/diffusion_head_full.bin +3 -0
- VibeVoice-finetuning/checkpoint-1800/lora/model.safetensors +3 -0
- VibeVoice-finetuning/checkpoint-1800/lora/semantic_connector/pytorch_model.bin +3 -0
- VibeVoice-finetuning/checkpoint-1800/optimizer.pt +3 -0
- VibeVoice-finetuning/checkpoint-1800/rng_state.pth +3 -0
- VibeVoice-finetuning/checkpoint-1800/scaler.pt +3 -0
- VibeVoice-finetuning/checkpoint-1800/scheduler.pt +3 -0
- VibeVoice-finetuning/checkpoint-1800/trainer_state.json +0 -0
- VibeVoice-finetuning/diff_head_layers.txt +26 -0
- VibeVoice-finetuning/lora/acoustic_connector/pytorch_model.bin +3 -0
- VibeVoice-finetuning/lora/config.json +26 -0
- VibeVoice-finetuning/lora/diffusion_head/config.json +20 -0
- VibeVoice-finetuning/lora/diffusion_head/diffusion_head_full.bin +3 -0
- VibeVoice-finetuning/lora/diffusion_head/model.safetensors +3 -0
- VibeVoice-finetuning/lora/diffusion_head_full.bin +3 -0
- VibeVoice-finetuning/lora/model.safetensors +3 -0
- VibeVoice-finetuning/lora/semantic_connector/pytorch_model.bin +3 -0
- VibeVoice-finetuning/preprocessed/.gitattributes +60 -0
- VibeVoice-finetuning/preprocessed/preprocessed_batches.pt +3 -0
- VibeVoice-finetuning/pyproject.toml +35 -0
- VibeVoice-finetuning/src/__pycache__/data_vibevoice.cpython-312.pyc +0 -0
- VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora0.cpython-312.pyc +0 -0
- VibeVoice-finetuning/src/data_vibevoice.py +453 -0
- VibeVoice-finetuning/src/finetune_vibevoice_lora.py +902 -0
- VibeVoice-finetuning/src/finetune_vibevoice_lora0.py +984 -0
- VibeVoice-finetuning/src/vibevoice/.DS_Store +0 -0
- VibeVoice-finetuning/src/vibevoice/configs/qwen2.5_1.5b_64k.json +112 -0
- VibeVoice-finetuning/src/vibevoice/configs/qwen2.5_7b_32k.json +113 -0
- VibeVoice-finetuning/src/vibevoice/modular/__init__.py +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-311.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-312.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-311.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-312.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-311.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-312.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-311.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-312.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-311.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-312.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-311.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-312.pyc +0 -0
- VibeVoice-finetuning/src/vibevoice/modular/configuration_vibevoice.py +248 -0
.gitattributes
CHANGED
|
@@ -58,3 +58,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
VibeVoice-finetuning/wandb/run-20260218_142500-puguclmi/run-puguclmi.wandb filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
VibeVoice-finetuning/wandb/run-20260218_143617-09tsct60/run-09tsct60.wandb filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
VibeVoice-finetuning/wandb/run-20260218_144236-a0h99ykt/run-a0h99ykt.wandb filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
VibeVoice-finetuning/wandb/run-20260219_165256-e29g8fsh/run-e29g8fsh.wandb filter=lfs diff=lfs merge=lfs -text
|
VibeVoice-finetuning/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Resemble AI
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
VibeVoice-finetuning/README.md
ADDED
|
@@ -0,0 +1,762 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# Unofficial WIP Finetuning repo for VibeVoice
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# Hardware requirements
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
To train a VibeVoice 1.5B LoRa, a machine with at least 16gb VRAM is recommended.
|
| 13 |
+
|
| 14 |
+
To train a VibeVoice 7B LoRa, a machine with at least 48gb VRAM is recommended.
|
| 15 |
+
|
| 16 |
+
Keep in mind longer audios increase VRAM requirements
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Installation
|
| 21 |
+
|
| 22 |
+
It is recommended to install this in a fresh environment. Specifically, the Dockerized environment `runpod/pytorch:2.8.0-py3.11-cuda12.8.1-cudnn-devel-ubuntu22.04` has been tested to work.
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
Transformers version 4.51.3 is known to work, while other versions have errors related to Qwen2 architecture.
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
```
|
| 30 |
+
git clone https://github.com/voicepowered-ai/VibeVoice-finetuning
|
| 31 |
+
|
| 32 |
+
pip install -e .
|
| 33 |
+
|
| 34 |
+
pip uninstall -y transformers && pip install transformers==4.51.3
|
| 35 |
+
|
| 36 |
+
(OPTIONAL) wandb login
|
| 37 |
+
|
| 38 |
+
(OPTIONAL) export HF_HOME=/workspace/hf_models
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# Usage
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
## VibeVoice 1.5B / 7B (LoRA) fine-tuning
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
We put some code together for training VibeVoice (7B) with LoRA. This uses the vendored VibeVoice model/processor and trains with a dual loss: masked CE on text tokens plus diffusion MSE on acoustic latents.
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
Requirements:
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
- Download a compatible VibeVoice 7B or 1.5b checkpoint (config + weights) and its processor files (preprocessor_config.json) or run straight from HF model.
|
| 64 |
+
|
| 65 |
+
- A 24khz audio dataset with audio files (target audio), text prompts (transcriptions) and optionally voice prompts (reference audio)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
### Training with Hugging Face Dataset
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
```
|
| 76 |
+
python -m src.finetune_vibevoice_lora \
|
| 77 |
+
|
| 78 |
+
--model_name_or_path aoi-ot/VibeVoice-Large \
|
| 79 |
+
|
| 80 |
+
--processor_name_or_path src/vibevoice/processor \
|
| 81 |
+
|
| 82 |
+
--dataset_name your/dataset \
|
| 83 |
+
|
| 84 |
+
--text_column_name text \
|
| 85 |
+
|
| 86 |
+
--audio_column_name audio \
|
| 87 |
+
|
| 88 |
+
--voice_prompts_column_name voice_prompts \
|
| 89 |
+
|
| 90 |
+
--output_dir outputTrain3 \
|
| 91 |
+
|
| 92 |
+
--per_device_train_batch_size 8 \
|
| 93 |
+
|
| 94 |
+
--gradient_accumulation_steps 16 \
|
| 95 |
+
|
| 96 |
+
--learning_rate 2.5e-5 \
|
| 97 |
+
|
| 98 |
+
--num_train_epochs 5 \
|
| 99 |
+
|
| 100 |
+
--logging_steps 10 \
|
| 101 |
+
|
| 102 |
+
--save_steps 100 \
|
| 103 |
+
|
| 104 |
+
--eval_steps 100 \
|
| 105 |
+
|
| 106 |
+
--report_to wandb \
|
| 107 |
+
|
| 108 |
+
--remove_unused_columns False \
|
| 109 |
+
|
| 110 |
+
--bf16 True \
|
| 111 |
+
|
| 112 |
+
--do_train \
|
| 113 |
+
|
| 114 |
+
--gradient_clipping \
|
| 115 |
+
|
| 116 |
+
--gradient_checkpointing False \
|
| 117 |
+
|
| 118 |
+
--ddpm_batch_mul 4 \
|
| 119 |
+
|
| 120 |
+
--diffusion_loss_weight 1.4 \
|
| 121 |
+
|
| 122 |
+
--train_diffusion_head True \
|
| 123 |
+
|
| 124 |
+
--ce_loss_weight 0.04 \
|
| 125 |
+
|
| 126 |
+
--voice_prompt_drop_rate 0.2 \
|
| 127 |
+
|
| 128 |
+
--lora_target_modules q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj \
|
| 129 |
+
|
| 130 |
+
--lr_scheduler_type cosine \
|
| 131 |
+
|
| 132 |
+
--warmup_ratio 0.03 \
|
| 133 |
+
|
| 134 |
+
--max_grad_norm 0.8
|
| 135 |
+
```
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
----------
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
### Training with Local JSONL Dataset
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
```
|
| 146 |
+
python -m src.finetune_vibevoice_lora \
|
| 147 |
+
|
| 148 |
+
--model_name_or_path aoi-ot/VibeVoice-Large \
|
| 149 |
+
|
| 150 |
+
--processor_name_or_path src/vibevoice/processor \
|
| 151 |
+
|
| 152 |
+
--train_jsonl prompts.jsonl \
|
| 153 |
+
|
| 154 |
+
--text_column_name text \
|
| 155 |
+
|
| 156 |
+
--audio_column_name audio \
|
| 157 |
+
|
| 158 |
+
--output_dir outputTrain3 \
|
| 159 |
+
|
| 160 |
+
--per_device_train_batch_size 8 \
|
| 161 |
+
|
| 162 |
+
--gradient_accumulation_steps 16 \
|
| 163 |
+
|
| 164 |
+
--learning_rate 2.5e-5 \
|
| 165 |
+
|
| 166 |
+
--num_train_epochs 5 \
|
| 167 |
+
|
| 168 |
+
--logging_steps 10 \
|
| 169 |
+
|
| 170 |
+
--save_steps 100 \
|
| 171 |
+
|
| 172 |
+
--report_to wandb \
|
| 173 |
+
|
| 174 |
+
--remove_unused_columns False \
|
| 175 |
+
|
| 176 |
+
--bf16 True \
|
| 177 |
+
|
| 178 |
+
--do_train \
|
| 179 |
+
|
| 180 |
+
--gradient_clipping \
|
| 181 |
+
|
| 182 |
+
--gradient_checkpointing False \
|
| 183 |
+
|
| 184 |
+
--ddpm_batch_mul 4 \
|
| 185 |
+
|
| 186 |
+
--diffusion_loss_weight 1.4 \
|
| 187 |
+
|
| 188 |
+
--train_diffusion_head True \
|
| 189 |
+
|
| 190 |
+
--ce_loss_weight 0.04 \
|
| 191 |
+
|
| 192 |
+
--voice_prompt_drop_rate 0.2 \
|
| 193 |
+
|
| 194 |
+
--lora_target_modules q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj \
|
| 195 |
+
|
| 196 |
+
--lr_scheduler_type cosine \
|
| 197 |
+
|
| 198 |
+
--warmup_ratio 0.03 \
|
| 199 |
+
|
| 200 |
+
--max_grad_norm 0.8
|
| 201 |
+
```
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
### JSONL format:
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
You can provide an optional `voice_prompts` key. If it is omitted, a voice prompt will be automatically generated from the target audio.
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
**Example without a pre-defined voice prompt (will be auto-generated):**
|
| 213 |
+
|
| 214 |
+
`{"text": "Speaker 0: Speaker0 transcription.", "audio": "/workspace/wavs/segment_000000.wav"}`
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
**Example with a pre-defined voice prompt:**
|
| 219 |
+
|
| 220 |
+
`{"text": "Speaker 0: Speaker0 transcription.", "audio": "/workspace/wavs/segment_000000.wav", "voice_prompts": "/path/to/a/different/prompt.wav"}`
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
**Example with multiple speakers and voice prompts:**
|
| 225 |
+
|
| 226 |
+
`{"text": "Speaker 0: How is the project coming along?\nSpeaker 1: It's going well, we should be finished by Friday.", "audio": "/data/conversations/convo_01.wav", "voice_prompts": ["/data/prompts/alice_voice_prompt.wav", "/data/prompts/bob_voice_prompt.wav"]}`
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
# Notes:
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
- Audio is assumed to be 24 kHz; input audio will be loaded/resampled to 24 kHz.
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
- If you pass raw NumPy arrays or torch Tensors as audio (without sampling rate metadata), the collator assumes they are already 24 kHz. To trigger resampling, provide dicts like {"array": <np.ndarray>, "sampling_rate": <int>} or file paths.
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
- Tokenizers (acoustic/semantic) are frozen by default. LoRA is applied to the LLM (Qwen) and optionally to the diffusion head.
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
- The collator builds interleaved sequences with speech placeholders and computes the required masks for diffusion loss.
|
| 249 |
+
|
| 250 |
+
- If a voice_prompts column is not provided in your dataset for a given sample, a voice prompt is **automatically generated** by taking a random clip from the target audio. This fallback ensures the model's voice cloning ability is maintained. You can override this behavior by providing your own voice prompts.
|
| 251 |
+
|
| 252 |
+
- Said voice_prompts are randomly dropped during training to improve generalization. Drop rates of 0.2 and 0.25 have been tested with satisfactory results.
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
- The model learns to emit a closing `[speech_end]` token after target placeholders.
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
- For multi‑speaker prompts, ensure `voice_prompts` list order matches `Speaker 0/1/...` tags in your text.
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
- LoRA adapters are saved under `output_dir/lora` after training.
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
# Acknowledgements
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
- [VibeVoice](https://github.com/microsoft/VibeVoice)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
- [chatterbox-finetuning](https://github.com/stlohrey/chatterbox-finetuning)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
## Training Script Arguments
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
Comprehensive list of all the command-line arguments available for the fine-tuning script.
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
### Model & Architecture Arguments
|
| 292 |
+
|
| 293 |
+
Controls the base model, its configuration, and which components are trained.
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
* `--model_name_or_path`
|
| 298 |
+
|
| 299 |
+
* **What it does:** Specifies the path to the pretrained VibeVoice base model. This can be a local directory or a Hugging Face Hub repository ID.
|
| 300 |
+
|
| 301 |
+
* **Required:** Yes.
|
| 302 |
+
|
| 303 |
+
* **Example:**
|
| 304 |
+
|
| 305 |
+
```bash
|
| 306 |
+
|
| 307 |
+
--model_name_or_path aoi-ot/VibeVoice-Large
|
| 308 |
+
|
| 309 |
+
```
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
* `--processor_name_or_path`
|
| 314 |
+
|
| 315 |
+
* **What it does:** Specifies the path to the VibeVoice processor configuration. If not provided, it defaults to the `model_name_or_path`.
|
| 316 |
+
|
| 317 |
+
* **Example:**
|
| 318 |
+
|
| 319 |
+
```bash
|
| 320 |
+
|
| 321 |
+
--processor_name_or_path src/vibevoice/processor
|
| 322 |
+
|
| 323 |
+
```
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
* `--train_diffusion_head`
|
| 328 |
+
|
| 329 |
+
* **What it does:** A boolean flag to enable **full fine-tuning** of the diffusion prediction head. When enabled, all parameters of the diffusion head become trainable.
|
| 330 |
+
|
| 331 |
+
* **Example:**
|
| 332 |
+
|
| 333 |
+
```bash
|
| 334 |
+
|
| 335 |
+
--train_diffusion_head True
|
| 336 |
+
|
| 337 |
+
```
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
* `--train_connectors`
|
| 342 |
+
|
| 343 |
+
* **What it does:** A boolean flag to enable training of the acoustic and semantic connectors, which bridge different parts of the model.
|
| 344 |
+
|
| 345 |
+
* **Example:**
|
| 346 |
+
|
| 347 |
+
```bash
|
| 348 |
+
|
| 349 |
+
--train_connectors True
|
| 350 |
+
|
| 351 |
+
```
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
* `--lora_target_modules`
|
| 356 |
+
|
| 357 |
+
* **What it does:** A comma-separated string of module names within the language model to apply LoRA adapters to. This is the primary way to enable LoRA for the text-processing part of the model.
|
| 358 |
+
|
| 359 |
+
* **Example:**
|
| 360 |
+
|
| 361 |
+
```bash
|
| 362 |
+
|
| 363 |
+
--lora_target_modules q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj
|
| 364 |
+
|
| 365 |
+
```
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
* `--lora_r`
|
| 370 |
+
|
| 371 |
+
* **What it does:** The rank (`r`) of the LoRA decomposition. A smaller number means fewer trainable parameters.
|
| 372 |
+
|
| 373 |
+
* **Default:** `8`
|
| 374 |
+
|
| 375 |
+
* **Example:**
|
| 376 |
+
|
| 377 |
+
```bash
|
| 378 |
+
|
| 379 |
+
--lora_r 16
|
| 380 |
+
|
| 381 |
+
```
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
* `--lora_alpha`
|
| 386 |
+
|
| 387 |
+
* **What it does:** The scaling factor for the LoRA weights. A common practice is to set `lora_alpha` to be four times the value of `lora_r`.
|
| 388 |
+
|
| 389 |
+
* **Default:** `32`
|
| 390 |
+
|
| 391 |
+
* **Example:**
|
| 392 |
+
|
| 393 |
+
```bash
|
| 394 |
+
|
| 395 |
+
--lora_alpha 64
|
| 396 |
+
|
| 397 |
+
```
|
| 398 |
+
|
| 399 |
+
* `--lora_wrap_diffusion_head`
|
| 400 |
+
|
| 401 |
+
* **What it does:** An **alternative** to `--train_diffusion_head`. If `True`, it applies LoRA adapters to the diffusion head instead of fine-tuning it fully, enabling more parameter-efficient training of the head. Must only use `--train_diffusion_head` or `--lora_wrap_diffusion_head`
|
| 402 |
+
|
| 403 |
+
* **Default:** `False`
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
* `--layers_to_freeze`
|
| 410 |
+
|
| 411 |
+
* **What it does:** Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8'). [Diffusion head layer indices](https://github.com/voicepowered-ai/VibeVoice-finetuning/blob/main/diff_head_layers.txt)
|
| 412 |
+
|
| 413 |
+
* **Default:** `None`
|
| 414 |
+
|
| 415 |
+
### Data & Processing Arguments
|
| 416 |
+
|
| 417 |
+
Defines the dataset to be used, its structure, and how it should be processed.
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
* `--train_jsonl`
|
| 422 |
+
|
| 423 |
+
* **What it does:** Path to your local training data file in JSONL (JSON Lines) format. Each line should be a JSON object with keys for text and audio path.
|
| 424 |
+
|
| 425 |
+
* **Example:**
|
| 426 |
+
|
| 427 |
+
```bash
|
| 428 |
+
|
| 429 |
+
--train_jsonl prompts.jsonl
|
| 430 |
+
|
| 431 |
+
```
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
* `--validation_jsonl`
|
| 436 |
+
|
| 437 |
+
* **What it does:** Optional path to a local validation data file in JSONL format.
|
| 438 |
+
|
| 439 |
+
* **Example:**
|
| 440 |
+
|
| 441 |
+
```bash
|
| 442 |
+
|
| 443 |
+
--validation_jsonl validation_prompts.jsonl
|
| 444 |
+
|
| 445 |
+
```
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
* `--text_column_name`
|
| 450 |
+
|
| 451 |
+
* **What it does:** The name of the key in your JSONL file that contains the text transcription/prompt.
|
| 452 |
+
|
| 453 |
+
* **Default:** `text`
|
| 454 |
+
|
| 455 |
+
* **Example:**
|
| 456 |
+
|
| 457 |
+
```bash
|
| 458 |
+
|
| 459 |
+
--text_column_name "prompt"
|
| 460 |
+
|
| 461 |
+
```
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
* `--audio_column_name`
|
| 466 |
+
|
| 467 |
+
* **What it does:** The name of the key in your JSONL file that contains the path to the audio file.
|
| 468 |
+
|
| 469 |
+
* **Default:** `audio`
|
| 470 |
+
|
| 471 |
+
* **Example:**
|
| 472 |
+
|
| 473 |
+
```bash
|
| 474 |
+
|
| 475 |
+
--audio_column_name "file_path"
|
| 476 |
+
|
| 477 |
+
```
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
* `--voice_prompt_drop_rate`
|
| 482 |
+
|
| 483 |
+
* **What it does:** The probability (from 0.0 to 1.0) of randomly dropping the conditioning voice prompt during training. This acts as a regularizer.
|
| 484 |
+
|
| 485 |
+
* **Default:** `0.0`
|
| 486 |
+
|
| 487 |
+
* **Example:**
|
| 488 |
+
|
| 489 |
+
```bash
|
| 490 |
+
|
| 491 |
+
--voice_prompt_drop_rate 0.2
|
| 492 |
+
|
| 493 |
+
```
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
### Core Training Arguments
|
| 498 |
+
|
| 499 |
+
Standard Hugging Face `TrainingArguments` that control the training loop, optimizer, and saving.
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
* `--output_dir`
|
| 504 |
+
|
| 505 |
+
* **What it does:** The directory where model checkpoints and final outputs will be saved.
|
| 506 |
+
|
| 507 |
+
* **Required:** Yes.
|
| 508 |
+
|
| 509 |
+
* **Example:**
|
| 510 |
+
|
| 511 |
+
```bash
|
| 512 |
+
|
| 513 |
+
--output_dir output_model
|
| 514 |
+
|
| 515 |
+
```
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
* `--per_device_train_batch_size`
|
| 520 |
+
|
| 521 |
+
* **What it does:** The number of training examples processed per GPU in a single step.
|
| 522 |
+
|
| 523 |
+
* **Example:**
|
| 524 |
+
|
| 525 |
+
```bash
|
| 526 |
+
|
| 527 |
+
--per_device_train_batch_size 8
|
| 528 |
+
|
| 529 |
+
```
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
* `--gradient_accumulation_steps`
|
| 534 |
+
|
| 535 |
+
* **What it does:** The number of forward passes to accumulate gradients for before performing an optimizer step. This effectively increases the batch size without using more VRAM.
|
| 536 |
+
|
| 537 |
+
* **Example:**
|
| 538 |
+
|
| 539 |
+
```bash
|
| 540 |
+
|
| 541 |
+
--gradient_accumulation_steps 16
|
| 542 |
+
|
| 543 |
+
```
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
* `--learning_rate`
|
| 548 |
+
|
| 549 |
+
* **What it does:** The initial learning rate for the optimizer.
|
| 550 |
+
|
| 551 |
+
* **Example:**
|
| 552 |
+
|
| 553 |
+
```bash
|
| 554 |
+
|
| 555 |
+
--learning_rate 2.5e-5
|
| 556 |
+
|
| 557 |
+
```
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
* `--num_train_epochs`
|
| 562 |
+
|
| 563 |
+
* **What it does:** The total number of times to iterate over the entire training dataset.
|
| 564 |
+
|
| 565 |
+
* **Example:**
|
| 566 |
+
|
| 567 |
+
```bash
|
| 568 |
+
|
| 569 |
+
--num_train_epochs 5
|
| 570 |
+
|
| 571 |
+
```
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
* `--logging_steps`
|
| 576 |
+
|
| 577 |
+
* **What it does:** How often (in steps) to log training metrics like loss.
|
| 578 |
+
|
| 579 |
+
* **Example:**
|
| 580 |
+
|
| 581 |
+
```bash
|
| 582 |
+
|
| 583 |
+
--logging_steps 10
|
| 584 |
+
|
| 585 |
+
```
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
* `--save_steps`
|
| 590 |
+
|
| 591 |
+
* **What it does:** How often (in steps) to save a model checkpoint.
|
| 592 |
+
|
| 593 |
+
* **Example:**
|
| 594 |
+
|
| 595 |
+
```bash
|
| 596 |
+
|
| 597 |
+
--save_steps 100
|
| 598 |
+
|
| 599 |
+
```
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
* `--report_to`
|
| 604 |
+
|
| 605 |
+
* **What it does:** The integration to report logs to. Can be `wandb`, `tensorboard`, or `none`.
|
| 606 |
+
|
| 607 |
+
* **Example:**
|
| 608 |
+
|
| 609 |
+
```bash
|
| 610 |
+
|
| 611 |
+
--report_to wandb
|
| 612 |
+
|
| 613 |
+
```
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
* `--remove_unused_columns`
|
| 618 |
+
|
| 619 |
+
* **What it does:** Whether to remove columns from the dataset not used by the model's `forward` method. **This must be set to `False`** for this script to work correctly.
|
| 620 |
+
|
| 621 |
+
* **Example:**
|
| 622 |
+
|
| 623 |
+
```bash
|
| 624 |
+
|
| 625 |
+
--remove_unused_columns False
|
| 626 |
+
|
| 627 |
+
```
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
* `--bf16`
|
| 632 |
+
|
| 633 |
+
* **What it does:** Enables mixed-precision training using `bfloat16`. This speeds up training and reduces memory usage on compatible GPUs (NVIDIA Ampere series and newer).
|
| 634 |
+
|
| 635 |
+
* **Example:**
|
| 636 |
+
|
| 637 |
+
```bash
|
| 638 |
+
|
| 639 |
+
--bf16 True
|
| 640 |
+
|
| 641 |
+
```
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
* `--gradient_checkpointing`
|
| 646 |
+
|
| 647 |
+
* **What it does:** A memory-saving technique that trades compute for memory. Useful for training large models on limited VRAM.
|
| 648 |
+
|
| 649 |
+
* **Example:**
|
| 650 |
+
|
| 651 |
+
```bash
|
| 652 |
+
|
| 653 |
+
--gradient_checkpointing True
|
| 654 |
+
|
| 655 |
+
```
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
* `--lr_scheduler_type`
|
| 660 |
+
|
| 661 |
+
* **What it does:** The type of learning rate schedule to use (e.g., `linear`, `cosine`, `constant`).
|
| 662 |
+
|
| 663 |
+
* **Example:**
|
| 664 |
+
|
| 665 |
+
```bash
|
| 666 |
+
|
| 667 |
+
--lr_scheduler_type cosine
|
| 668 |
+
|
| 669 |
+
```
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
* `--warmup_ratio`
|
| 674 |
+
|
| 675 |
+
* **What it does:** The proportion of total training steps used for a linear warmup from 0 to the initial learning rate.
|
| 676 |
+
|
| 677 |
+
* **Example:**
|
| 678 |
+
|
| 679 |
+
```bash
|
| 680 |
+
|
| 681 |
+
--warmup_ratio 0.03
|
| 682 |
+
|
| 683 |
+
```
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
### Custom VibeVoice Training Arguments
|
| 688 |
+
|
| 689 |
+
Special arguments to control VibeVoice-specific training behaviors.
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
* `--gradient_clipping`
|
| 694 |
+
|
| 695 |
+
* **What it does:** A custom boolean flag that acts as the master switch for gradient clipping. If you include this flag, the value from `--max_grad_norm` will be used to prevent exploding gradients.
|
| 696 |
+
|
| 697 |
+
* **Example:**
|
| 698 |
+
|
| 699 |
+
```bash
|
| 700 |
+
|
| 701 |
+
--gradient_clipping
|
| 702 |
+
|
| 703 |
+
```
|
| 704 |
+
|
| 705 |
+
* `--max_grad_norm`
|
| 706 |
+
|
| 707 |
+
* **What it does:** The maximum value for gradient clipping. Only active if `--gradient_clipping` is also used.
|
| 708 |
+
|
| 709 |
+
* **Default:** `1.0`
|
| 710 |
+
|
| 711 |
+
* **Example:**
|
| 712 |
+
|
| 713 |
+
```bash
|
| 714 |
+
|
| 715 |
+
--max_grad_norm 0.8
|
| 716 |
+
|
| 717 |
+
```
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
|
| 721 |
+
* `--diffusion_loss_weight`
|
| 722 |
+
|
| 723 |
+
* **What it does:** A float that scales the importance of the diffusion loss (for speech generation quality) in the total loss calculation.
|
| 724 |
+
|
| 725 |
+
* **Example:**
|
| 726 |
+
|
| 727 |
+
```bash
|
| 728 |
+
|
| 729 |
+
--diffusion_loss_weight 1.4
|
| 730 |
+
|
| 731 |
+
```
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
* `--ce_loss_weight`
|
| 736 |
+
|
| 737 |
+
* **What it does:** A float that scales the importance of the Cross-Entropy loss (for text prediction accuracy) in the total loss calculation.
|
| 738 |
+
|
| 739 |
+
* **Example:**
|
| 740 |
+
|
| 741 |
+
```bash
|
| 742 |
+
|
| 743 |
+
--ce_loss_weight 0.04
|
| 744 |
+
|
| 745 |
+
```
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
|
| 749 |
+
* `--ddpm_batch_mul`
|
| 750 |
+
|
| 751 |
+
* **What it does:** An integer multiplier for the batch size used specifically within the diffusion process.
|
| 752 |
+
|
| 753 |
+
* **Example:**
|
| 754 |
+
|
| 755 |
+
```bash
|
| 756 |
+
|
| 757 |
+
--ddpm_batch_mul 4
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
```
|
| 761 |
+
|
| 762 |
+
|
VibeVoice-finetuning/checkpoint-1800/lora/acoustic_connector/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b88dadd781938d55285c2239be35bbdd41c7c2e0f6b783c2d6a0f1b99505ba4
|
| 3 |
+
size 4927259
|
VibeVoice-finetuning/checkpoint-1800/lora/config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen2Model"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"hidden_act": "silu",
|
| 7 |
+
"hidden_size": 1536,
|
| 8 |
+
"initializer_range": 0.02,
|
| 9 |
+
"intermediate_size": 8960,
|
| 10 |
+
"max_position_embeddings": 65536,
|
| 11 |
+
"max_window_layers": 28,
|
| 12 |
+
"model_type": "qwen2",
|
| 13 |
+
"num_attention_heads": 12,
|
| 14 |
+
"num_hidden_layers": 28,
|
| 15 |
+
"num_key_value_heads": 2,
|
| 16 |
+
"rms_norm_eps": 1e-06,
|
| 17 |
+
"rope_scaling": null,
|
| 18 |
+
"rope_theta": 1000000.0,
|
| 19 |
+
"sliding_window": null,
|
| 20 |
+
"tie_word_embeddings": true,
|
| 21 |
+
"torch_dtype": "float16",
|
| 22 |
+
"transformers_version": "4.51.3",
|
| 23 |
+
"use_cache": true,
|
| 24 |
+
"use_sliding_window": false,
|
| 25 |
+
"vocab_size": 151936
|
| 26 |
+
}
|
VibeVoice-finetuning/checkpoint-1800/lora/diffusion_head/config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"VibeVoiceDiffusionHead"
|
| 4 |
+
],
|
| 5 |
+
"ddpm_batch_mul": 4,
|
| 6 |
+
"ddpm_beta_schedule": "cosine",
|
| 7 |
+
"ddpm_num_inference_steps": 20,
|
| 8 |
+
"ddpm_num_steps": 1000,
|
| 9 |
+
"diffusion_type": "ddpm",
|
| 10 |
+
"head_ffn_ratio": 3.0,
|
| 11 |
+
"head_layers": 4,
|
| 12 |
+
"hidden_size": 1536,
|
| 13 |
+
"latent_size": 64,
|
| 14 |
+
"model_type": "vibevoice_diffusion_head",
|
| 15 |
+
"prediction_type": "v_prediction",
|
| 16 |
+
"rms_norm_eps": 1e-05,
|
| 17 |
+
"speech_vae_dim": 64,
|
| 18 |
+
"torch_dtype": "float32",
|
| 19 |
+
"transformers_version": "4.51.3"
|
| 20 |
+
}
|
VibeVoice-finetuning/checkpoint-1800/lora/diffusion_head/diffusion_head_full.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60f6d8625f95debfb00261eb4c59426f92c6b3e36f82360a224a468232fb282a
|
| 3 |
+
size 493128917
|
VibeVoice-finetuning/checkpoint-1800/lora/diffusion_head/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f289616d1a6c7baa04d15b7d50de2ed0a6aaf930f83b1eacf511a04a7b43c56
|
| 3 |
+
size 493120120
|
VibeVoice-finetuning/checkpoint-1800/lora/diffusion_head_full.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60f6d8625f95debfb00261eb4c59426f92c6b3e36f82360a224a468232fb282a
|
| 3 |
+
size 493128917
|
VibeVoice-finetuning/checkpoint-1800/lora/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5f2e022f7b68de757f4ba14867694d11fffe9ef91d7ae99bfacd28b0488c64b
|
| 3 |
+
size 3087464784
|
VibeVoice-finetuning/checkpoint-1800/lora/semantic_connector/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abf1e11c63eab8288c5fb88c1dff0f4a771ae717fcc318a261bde35af562a653
|
| 3 |
+
size 5123867
|
VibeVoice-finetuning/checkpoint-1800/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f85fd7ea39530f0548a70dc66232f7b625d78ccb00cebbcda1abe5abe657e862
|
| 3 |
+
size 986257761
|
VibeVoice-finetuning/checkpoint-1800/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8c5befc5e8d63c950202519627dbff5b657b69e7d656669368658e526d4dd9ac
|
| 3 |
+
size 14645
|
VibeVoice-finetuning/checkpoint-1800/scaler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fa0408efb69cab96d5bab9a1aaf44cedbc9fc8d34f4cef378d81605e5c026d5c
|
| 3 |
+
size 1383
|
VibeVoice-finetuning/checkpoint-1800/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3badd5864ece5596111b84b5ab52b87875d3a6076273d4be1f10c67323e55bc4
|
| 3 |
+
size 1465
|
VibeVoice-finetuning/checkpoint-1800/trainer_state.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
VibeVoice-finetuning/diff_head_layers.txt
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[0] noisy_images_proj.weight (shape: (3584, 64), trainable: True)
|
| 2 |
+
[1] cond_proj.weight (shape: (3584, 3584), trainable: True)
|
| 3 |
+
[2] t_embedder.mlp.0.weight (shape: (3584, 256), trainable: True)
|
| 4 |
+
[3] t_embedder.mlp.2.weight (shape: (3584, 3584), trainable: True)
|
| 5 |
+
[4] layers.0.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
|
| 6 |
+
[5] layers.0.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
|
| 7 |
+
[6] layers.0.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
|
| 8 |
+
[7] layers.0.norm.weight (shape: (3584,), trainable: True)
|
| 9 |
+
[8] layers.0.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
|
| 10 |
+
[9] layers.1.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
|
| 11 |
+
[10] layers.1.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
|
| 12 |
+
[11] layers.1.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
|
| 13 |
+
[12] layers.1.norm.weight (shape: (3584,), trainable: True)
|
| 14 |
+
[13] layers.1.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
|
| 15 |
+
[14] layers.2.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
|
| 16 |
+
[15] layers.2.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
|
| 17 |
+
[16] layers.2.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
|
| 18 |
+
[17] layers.2.norm.weight (shape: (3584,), trainable: True)
|
| 19 |
+
[18] layers.2.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
|
| 20 |
+
[19] layers.3.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
|
| 21 |
+
[20] layers.3.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
|
| 22 |
+
[21] layers.3.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
|
| 23 |
+
[22] layers.3.norm.weight (shape: (3584,), trainable: True)
|
| 24 |
+
[23] layers.3.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
|
| 25 |
+
[24] final_layer.linear.weight (shape: (64, 3584), trainable: True)
|
| 26 |
+
[25] final_layer.adaLN_modulation.1.weight (shape: (7168, 3584), trainable: True)
|
VibeVoice-finetuning/lora/acoustic_connector/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b88dadd781938d55285c2239be35bbdd41c7c2e0f6b783c2d6a0f1b99505ba4
|
| 3 |
+
size 4927259
|
VibeVoice-finetuning/lora/config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen2Model"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"hidden_act": "silu",
|
| 7 |
+
"hidden_size": 1536,
|
| 8 |
+
"initializer_range": 0.02,
|
| 9 |
+
"intermediate_size": 8960,
|
| 10 |
+
"max_position_embeddings": 65536,
|
| 11 |
+
"max_window_layers": 28,
|
| 12 |
+
"model_type": "qwen2",
|
| 13 |
+
"num_attention_heads": 12,
|
| 14 |
+
"num_hidden_layers": 28,
|
| 15 |
+
"num_key_value_heads": 2,
|
| 16 |
+
"rms_norm_eps": 1e-06,
|
| 17 |
+
"rope_scaling": null,
|
| 18 |
+
"rope_theta": 1000000.0,
|
| 19 |
+
"sliding_window": null,
|
| 20 |
+
"tie_word_embeddings": true,
|
| 21 |
+
"torch_dtype": "float16",
|
| 22 |
+
"transformers_version": "4.51.3",
|
| 23 |
+
"use_cache": true,
|
| 24 |
+
"use_sliding_window": false,
|
| 25 |
+
"vocab_size": 151936
|
| 26 |
+
}
|
VibeVoice-finetuning/lora/diffusion_head/config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"VibeVoiceDiffusionHead"
|
| 4 |
+
],
|
| 5 |
+
"ddpm_batch_mul": 4,
|
| 6 |
+
"ddpm_beta_schedule": "cosine",
|
| 7 |
+
"ddpm_num_inference_steps": 20,
|
| 8 |
+
"ddpm_num_steps": 1000,
|
| 9 |
+
"diffusion_type": "ddpm",
|
| 10 |
+
"head_ffn_ratio": 3.0,
|
| 11 |
+
"head_layers": 4,
|
| 12 |
+
"hidden_size": 1536,
|
| 13 |
+
"latent_size": 64,
|
| 14 |
+
"model_type": "vibevoice_diffusion_head",
|
| 15 |
+
"prediction_type": "v_prediction",
|
| 16 |
+
"rms_norm_eps": 1e-05,
|
| 17 |
+
"speech_vae_dim": 64,
|
| 18 |
+
"torch_dtype": "float32",
|
| 19 |
+
"transformers_version": "4.51.3"
|
| 20 |
+
}
|
VibeVoice-finetuning/lora/diffusion_head/diffusion_head_full.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4c54b8fe3ea53ed300c6eeacdf3b2c1d37bba1efea866139d179d9db4c3a4686
|
| 3 |
+
size 493128917
|
VibeVoice-finetuning/lora/diffusion_head/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:872e232eb03d378eed52d7c436c1f0f8672672f123d60a7f301dfd66363b7519
|
| 3 |
+
size 493120120
|
VibeVoice-finetuning/lora/diffusion_head_full.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4c54b8fe3ea53ed300c6eeacdf3b2c1d37bba1efea866139d179d9db4c3a4686
|
| 3 |
+
size 493128917
|
VibeVoice-finetuning/lora/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5f2e022f7b68de757f4ba14867694d11fffe9ef91d7ae99bfacd28b0488c64b
|
| 3 |
+
size 3087464784
|
VibeVoice-finetuning/lora/semantic_connector/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abf1e11c63eab8288c5fb88c1dff0f4a771ae717fcc318a261bde35af562a653
|
| 3 |
+
size 5123867
|
VibeVoice-finetuning/preprocessed/.gitattributes
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.avro filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
# Audio files - uncompressed
|
| 40 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
# Audio files - compressed
|
| 44 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
# Image files - uncompressed
|
| 50 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
# Image files - compressed
|
| 55 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
# Video files - compressed
|
| 59 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
VibeVoice-finetuning/preprocessed/preprocessed_batches.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a4f1395fc7ba717c7acc2192b0c41a19c914dd2f4155f9040e9ff3de7aaf7258
|
| 3 |
+
size 7805059199
|
VibeVoice-finetuning/pyproject.toml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "vibevoice-finetuning"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Open Source finetuning code for VibeVoice"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.8"
|
| 7 |
+
license = {file = "LICENSE"}
|
| 8 |
+
authors = [
|
| 9 |
+
{name = "jpgallegoarvpb", email = "juanpablo.gallego@voicepowered.ai"}
|
| 10 |
+
]
|
| 11 |
+
dependencies = [
|
| 12 |
+
"numpy~=1.26.0",
|
| 13 |
+
"resampy==0.4.3",
|
| 14 |
+
"librosa==0.11.0",
|
| 15 |
+
"s3tokenizer",
|
| 16 |
+
"torch",
|
| 17 |
+
"torchaudio",
|
| 18 |
+
"transformers",
|
| 19 |
+
"datasets>=2.18.0",
|
| 20 |
+
"diffusers==0.29.0",
|
| 21 |
+
"resemble-perth==1.0.1",
|
| 22 |
+
"omegaconf==2.3.0",
|
| 23 |
+
"conformer==0.3.2",
|
| 24 |
+
"safetensors==0.5.3",
|
| 25 |
+
"peft>=0.11.0",
|
| 26 |
+
"tensorboard>=2.12",
|
| 27 |
+
"wandb"
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
[build-system]
|
| 31 |
+
requires = ["setuptools>=61.0"]
|
| 32 |
+
build-backend = "setuptools.build_meta"
|
| 33 |
+
|
| 34 |
+
[tool.setuptools.packages.find]
|
| 35 |
+
where = ["src"]
|
VibeVoice-finetuning/src/__pycache__/data_vibevoice.cpython-312.pyc
ADDED
|
Binary file (23.2 kB). View file
|
|
|
VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora0.cpython-312.pyc
ADDED
|
Binary file (64.9 kB). View file
|
|
|
VibeVoice-finetuning/src/data_vibevoice.py
ADDED
|
@@ -0,0 +1,453 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import warnings
|
| 8 |
+
import random
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import librosa # type: ignore
|
| 12 |
+
except Exception: # pragma: no cover
|
| 13 |
+
librosa = None # Fallback: user must install librosa when using local audio paths
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
import resampy # type: ignore
|
| 17 |
+
except Exception: # pragma: no cover
|
| 18 |
+
resampy = None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _resample_if_needed(wav: np.ndarray, orig_sr: int, target_sr: int) -> np.ndarray:
|
| 22 |
+
if orig_sr == target_sr:
|
| 23 |
+
return wav.astype(np.float32, copy=False)
|
| 24 |
+
if resampy is not None:
|
| 25 |
+
return resampy.resample(wav.astype(np.float32), orig_sr, target_sr)
|
| 26 |
+
if librosa is not None:
|
| 27 |
+
return librosa.resample(y=wav.astype(np.float32), orig_sr=orig_sr, target_sr=target_sr)
|
| 28 |
+
warnings.warn(
|
| 29 |
+
"No resampler available; treating audio as target_sr without resampling. Install resampy or librosa.",
|
| 30 |
+
RuntimeWarning,
|
| 31 |
+
)
|
| 32 |
+
return wav.astype(np.float32, copy=False)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# Lightweight HF-style dataset wrapper (optional). Trainer can also pass raw HF datasets directly.
|
| 36 |
+
class VibeVoiceDataset:
|
| 37 |
+
def __init__(
|
| 38 |
+
self,
|
| 39 |
+
dataset: Any,
|
| 40 |
+
text_column: str = "text",
|
| 41 |
+
audio_column: str = "audio",
|
| 42 |
+
voice_prompts_column: Optional[str] = "voice_prompts",
|
| 43 |
+
) -> None:
|
| 44 |
+
self.dataset = dataset
|
| 45 |
+
self.text_column = text_column
|
| 46 |
+
self.audio_column = audio_column
|
| 47 |
+
self.voice_prompts_column = voice_prompts_column
|
| 48 |
+
|
| 49 |
+
def __len__(self) -> int:
|
| 50 |
+
return len(self.dataset)
|
| 51 |
+
|
| 52 |
+
def __getitem__(self, idx: int) -> Dict[str, Any]:
|
| 53 |
+
item = self.dataset[idx]
|
| 54 |
+
data: Dict[str, Any] = {}
|
| 55 |
+
data["text"] = item[self.text_column]
|
| 56 |
+
data["audio"] = item[self.audio_column]
|
| 57 |
+
|
| 58 |
+
user_provided_prompt = None
|
| 59 |
+
if self.voice_prompts_column and self.voice_prompts_column in item:
|
| 60 |
+
user_provided_prompt = item[self.voice_prompts_column]
|
| 61 |
+
|
| 62 |
+
if user_provided_prompt:
|
| 63 |
+
# A prompt was provided in the dataset, so we use it.
|
| 64 |
+
if not isinstance(user_provided_prompt, list):
|
| 65 |
+
data["voice_prompts"] = [user_provided_prompt]
|
| 66 |
+
else:
|
| 67 |
+
data["voice_prompts"] = user_provided_prompt
|
| 68 |
+
else:
|
| 69 |
+
# FALLBACK: No prompt provided, so we auto-generate one from the target audio.
|
| 70 |
+
try:
|
| 71 |
+
target_sr = 24000
|
| 72 |
+
wav_array = _load_audio_to_24k(item[self.audio_column], target_sr=target_sr)
|
| 73 |
+
audio_len_seconds = len(wav_array) / target_sr
|
| 74 |
+
|
| 75 |
+
min_len_sec = min(5.0, audio_len_seconds / 4.0)
|
| 76 |
+
max_len_sec = min(15.0, audio_len_seconds / 2.0)
|
| 77 |
+
|
| 78 |
+
if min_len_sec > max_len_sec:
|
| 79 |
+
min_len_sec = max_len_sec
|
| 80 |
+
max_len_sec = min(max_len_sec, audio_len_seconds)
|
| 81 |
+
|
| 82 |
+
if max_len_sec > 0.1:
|
| 83 |
+
prompt_len_sec = random.uniform(min_len_sec, max_len_sec)
|
| 84 |
+
prompt_len_samples = int(prompt_len_sec * target_sr)
|
| 85 |
+
|
| 86 |
+
max_start_sample = len(wav_array) - prompt_len_samples
|
| 87 |
+
start_sample = random.randint(0, max_start_sample)
|
| 88 |
+
|
| 89 |
+
prompt_crop = wav_array[start_sample : start_sample + prompt_len_samples]
|
| 90 |
+
|
| 91 |
+
data["voice_prompts"] = [prompt_crop]
|
| 92 |
+
else:
|
| 93 |
+
data["voice_prompts"] = None
|
| 94 |
+
|
| 95 |
+
except Exception as e:
|
| 96 |
+
warnings.warn(f"Could not create voice prompt for item {idx}: {e}")
|
| 97 |
+
data["voice_prompts"] = None
|
| 98 |
+
return data
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _apply_silence_with_crossfade(
|
| 102 |
+
wav: np.ndarray,
|
| 103 |
+
*,
|
| 104 |
+
sample_rate: int,
|
| 105 |
+
pre_silence_sec: float = 0.25,
|
| 106 |
+
pre_crossfade_sec: float = 0.25,
|
| 107 |
+
post_crossfade_sec: float = 0.25,
|
| 108 |
+
post_silence_sec: float = 0.75,
|
| 109 |
+
) -> np.ndarray:
|
| 110 |
+
"""Pad audio with leading/trailing silence and apply crossfades.
|
| 111 |
+
|
| 112 |
+
Structure: [pre_silence][pre_crossfade][audio_body][post_crossfade][post_silence]
|
| 113 |
+
Crossfades blend the audio with silence linearly to avoid hard edges.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
wav = np.asarray(wav, dtype=np.float32).reshape(-1)
|
| 117 |
+
|
| 118 |
+
start_sil_samples = int(round(pre_silence_sec * sample_rate))
|
| 119 |
+
end_sil_samples = int(round(post_silence_sec * sample_rate))
|
| 120 |
+
pre_crossfade_samples = int(round(pre_crossfade_sec * sample_rate))
|
| 121 |
+
post_crossfade_samples = int(round(post_crossfade_sec * sample_rate))
|
| 122 |
+
|
| 123 |
+
total_len = wav.shape[0]
|
| 124 |
+
if total_len == 0:
|
| 125 |
+
pieces: List[np.ndarray] = []
|
| 126 |
+
if start_sil_samples > 0:
|
| 127 |
+
pieces.append(np.zeros(start_sil_samples, dtype=np.float32))
|
| 128 |
+
if end_sil_samples > 0:
|
| 129 |
+
pieces.append(np.zeros(end_sil_samples, dtype=np.float32))
|
| 130 |
+
return np.concatenate(pieces) if pieces else wav
|
| 131 |
+
|
| 132 |
+
start_len = min(pre_crossfade_samples, total_len)
|
| 133 |
+
remaining_after_start = max(total_len - start_len, 0)
|
| 134 |
+
end_len = min(post_crossfade_samples, remaining_after_start)
|
| 135 |
+
middle_end_idx = total_len - end_len
|
| 136 |
+
|
| 137 |
+
start_segment = wav[:start_len]
|
| 138 |
+
middle_segment = wav[start_len:middle_end_idx]
|
| 139 |
+
end_segment = wav[middle_end_idx:]
|
| 140 |
+
|
| 141 |
+
def _linear_fade(num_samples: int, start: float, end: float) -> np.ndarray:
|
| 142 |
+
if num_samples <= 0:
|
| 143 |
+
return np.zeros((0,), dtype=np.float32)
|
| 144 |
+
return np.linspace(start, end, num_samples, endpoint=True, dtype=np.float32)
|
| 145 |
+
|
| 146 |
+
start_crossfade = start_segment * _linear_fade(start_len, 0.0, 1.0)
|
| 147 |
+
end_crossfade = end_segment * _linear_fade(end_segment.shape[0], 1.0, 0.0)
|
| 148 |
+
|
| 149 |
+
pieces: List[np.ndarray] = []
|
| 150 |
+
if start_sil_samples > 0:
|
| 151 |
+
pieces.append(np.zeros(start_sil_samples, dtype=np.float32))
|
| 152 |
+
if start_crossfade.size > 0:
|
| 153 |
+
pieces.append(start_crossfade.astype(np.float32, copy=False))
|
| 154 |
+
if middle_segment.size > 0:
|
| 155 |
+
pieces.append(middle_segment.astype(np.float32, copy=False))
|
| 156 |
+
if end_crossfade.size > 0:
|
| 157 |
+
pieces.append(end_crossfade.astype(np.float32, copy=False))
|
| 158 |
+
if end_sil_samples > 0:
|
| 159 |
+
pieces.append(np.zeros(end_sil_samples, dtype=np.float32))
|
| 160 |
+
|
| 161 |
+
return np.concatenate(pieces)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def _load_audio_to_24k(
|
| 165 |
+
audio: Union[str, np.ndarray, torch.Tensor, Dict[str, Any]],
|
| 166 |
+
*,
|
| 167 |
+
target_sr: int = 24000,
|
| 168 |
+
augment_with_silence: bool = False,
|
| 169 |
+
) -> np.ndarray:
|
| 170 |
+
if isinstance(audio, np.ndarray):
|
| 171 |
+
wav_out = audio.astype(np.float32)
|
| 172 |
+
elif isinstance(audio, torch.Tensor):
|
| 173 |
+
wav_out = audio.detach().cpu().float().numpy()
|
| 174 |
+
elif isinstance(audio, str):
|
| 175 |
+
if librosa is None:
|
| 176 |
+
raise RuntimeError("librosa is required to load audio file paths. Please pip install librosa.")
|
| 177 |
+
wav, sr = librosa.load(audio, sr=None, mono=True)
|
| 178 |
+
wav_out = _resample_if_needed(wav, int(sr), target_sr)
|
| 179 |
+
elif isinstance(audio, dict) and "array" in audio and "sampling_rate" in audio:
|
| 180 |
+
arr = np.asarray(audio["array"], dtype=np.float32)
|
| 181 |
+
sr = int(audio["sampling_rate"])
|
| 182 |
+
wav_out = _resample_if_needed(arr, sr, target_sr)
|
| 183 |
+
else:
|
| 184 |
+
raise ValueError(f"Unsupported audio type: {type(audio)}")
|
| 185 |
+
|
| 186 |
+
wav_out = np.asarray(wav_out, dtype=np.float32)
|
| 187 |
+
|
| 188 |
+
if augment_with_silence:
|
| 189 |
+
wav_out = _apply_silence_with_crossfade(wav_out, sample_rate=target_sr)
|
| 190 |
+
|
| 191 |
+
return wav_out
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@dataclass
|
| 195 |
+
class VibeVoiceCollator:
|
| 196 |
+
processor: Any # VibeVoiceProcessor
|
| 197 |
+
max_length: Optional[int] = None
|
| 198 |
+
speech_compress_ratio: int = 3200
|
| 199 |
+
semantic_vae_dim: int = 128
|
| 200 |
+
compute_semantics: bool = False
|
| 201 |
+
debug_checks: bool = False
|
| 202 |
+
|
| 203 |
+
text_field: str = "text"
|
| 204 |
+
audio_field: str = "audio"
|
| 205 |
+
voice_prompts_field: str = "voice_prompts"
|
| 206 |
+
voice_prompt_drop_rate: float = 0.0
|
| 207 |
+
|
| 208 |
+
def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, Any]:
|
| 209 |
+
batch_size = len(features)
|
| 210 |
+
|
| 211 |
+
sample_input_ids: List[List[int]] = []
|
| 212 |
+
sample_attention_masks: List[List[int]] = []
|
| 213 |
+
sample_acoustic_input_masks: List[List[bool]] = []
|
| 214 |
+
sample_acoustic_loss_masks: List[List[bool]] = []
|
| 215 |
+
|
| 216 |
+
all_speech_waveforms: List[np.ndarray] = []
|
| 217 |
+
all_speech_latent_lengths: List[int] = []
|
| 218 |
+
per_segment_is_target: List[bool] = []
|
| 219 |
+
|
| 220 |
+
for ex in features:
|
| 221 |
+
text: str = ex.get(self.text_field, "")
|
| 222 |
+
voice_prompts: Optional[List[Union[str, np.ndarray, torch.Tensor]]] = ex.get(self.voice_prompts_field)
|
| 223 |
+
target_audio: Union[str, np.ndarray, torch.Tensor, Dict[str, Any]] = ex.get(self.audio_field)
|
| 224 |
+
|
| 225 |
+
# Clamp drop rate for safety
|
| 226 |
+
_drop_rate = self.voice_prompt_drop_rate
|
| 227 |
+
if _drop_rate < 0.0:
|
| 228 |
+
_drop_rate = 0.0
|
| 229 |
+
elif _drop_rate > 1.0:
|
| 230 |
+
_drop_rate = 1.0
|
| 231 |
+
|
| 232 |
+
proc = self.processor(
|
| 233 |
+
text=[text],
|
| 234 |
+
voice_samples=[voice_prompts] if voice_prompts is not None and random.random() >= _drop_rate else None,
|
| 235 |
+
padding=False,
|
| 236 |
+
truncation=False,
|
| 237 |
+
max_length=self.max_length,
|
| 238 |
+
return_tensors="pt",
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
ids = proc["input_ids"][0].tolist()
|
| 242 |
+
attn = proc.get("attention_mask", torch.ones_like(proc["input_ids"]))[0].tolist()
|
| 243 |
+
speech_input_mask = proc.get("speech_input_mask")
|
| 244 |
+
if speech_input_mask is None:
|
| 245 |
+
speech_input_mask = torch.zeros_like(proc["input_ids"], dtype=torch.bool)
|
| 246 |
+
speech_input_mask_list = speech_input_mask[0].tolist()
|
| 247 |
+
|
| 248 |
+
wav_target = _load_audio_to_24k(target_audio, target_sr=24000, augment_with_silence=True)
|
| 249 |
+
# Prefer exact frame count from acoustic tokenizer if available; fallback to compress ratio
|
| 250 |
+
target_latent_len = None
|
| 251 |
+
try:
|
| 252 |
+
acoustic_tok = getattr(self.processor, "acoustic_tokenizer", None)
|
| 253 |
+
if acoustic_tok is not None and hasattr(acoustic_tok, "encode"):
|
| 254 |
+
enc_out = acoustic_tok.encode(wav_target)
|
| 255 |
+
# Normalize various possible return formats to get time dimension
|
| 256 |
+
T = None
|
| 257 |
+
try:
|
| 258 |
+
# Direct array-like with shape (T, D) or (T,)
|
| 259 |
+
if hasattr(enc_out, "shape") and len(getattr(enc_out, "shape", [])) >= 1:
|
| 260 |
+
T = int(enc_out.shape[0])
|
| 261 |
+
else:
|
| 262 |
+
# Nested lists/tuples or ModelOutput-like
|
| 263 |
+
cand = enc_out
|
| 264 |
+
# Drill down a couple of levels safely
|
| 265 |
+
for _ in range(2):
|
| 266 |
+
if isinstance(cand, (list, tuple)) and len(cand) > 0:
|
| 267 |
+
cand = cand[0]
|
| 268 |
+
if hasattr(cand, "shape") and len(getattr(cand, "shape", [])) >= 1:
|
| 269 |
+
T = int(cand.shape[0])
|
| 270 |
+
except Exception:
|
| 271 |
+
T = None
|
| 272 |
+
if T is not None and T > 0:
|
| 273 |
+
target_latent_len = T
|
| 274 |
+
except Exception:
|
| 275 |
+
target_latent_len = None
|
| 276 |
+
if target_latent_len is None:
|
| 277 |
+
target_latent_len = max(1, int(math.ceil(len(wav_target) / float(self.speech_compress_ratio))))
|
| 278 |
+
|
| 279 |
+
speech_diff_id = self.processor.tokenizer.speech_diffusion_id
|
| 280 |
+
target_placeholders = [speech_diff_id] * target_latent_len
|
| 281 |
+
|
| 282 |
+
ids_extended = ids + target_placeholders
|
| 283 |
+
attn_extended = attn + [1] * target_latent_len
|
| 284 |
+
|
| 285 |
+
acoustic_input_mask = speech_input_mask_list + [True] * target_latent_len
|
| 286 |
+
acoustic_loss_mask = ([False] * len(speech_input_mask_list)) + [True] * target_latent_len
|
| 287 |
+
|
| 288 |
+
speech_end_id = self.processor.tokenizer.speech_end_id
|
| 289 |
+
ids_extended.append(speech_end_id)
|
| 290 |
+
attn_extended.append(1)
|
| 291 |
+
acoustic_input_mask.append(False)
|
| 292 |
+
acoustic_loss_mask.append(False)
|
| 293 |
+
|
| 294 |
+
# Ensure text decoding sees an explicit end-of-sequence token after speech output.
|
| 295 |
+
eos_token_id = getattr(self.processor.tokenizer, "eos_id", None)
|
| 296 |
+
if eos_token_id is None:
|
| 297 |
+
eos_token_id = getattr(self.processor.tokenizer, "eos_token_id", None)
|
| 298 |
+
if eos_token_id is not None and eos_token_id >= 0:
|
| 299 |
+
ids_extended.append(eos_token_id)
|
| 300 |
+
attn_extended.append(1)
|
| 301 |
+
acoustic_input_mask.append(False)
|
| 302 |
+
acoustic_loss_mask.append(False)
|
| 303 |
+
|
| 304 |
+
if self.max_length is not None and len(ids_extended) > self.max_length:
|
| 305 |
+
cut = len(ids_extended) - int(self.max_length)
|
| 306 |
+
leading_non_acoustic = 0
|
| 307 |
+
for v in acoustic_input_mask:
|
| 308 |
+
if v:
|
| 309 |
+
break
|
| 310 |
+
leading_non_acoustic += 1
|
| 311 |
+
if cut > leading_non_acoustic:
|
| 312 |
+
raise ValueError(
|
| 313 |
+
f"--max_length={self.max_length} would truncate into acoustic tokens. "
|
| 314 |
+
f"Needed cut={cut}, but only {leading_non_acoustic} leading non-acoustic tokens available. "
|
| 315 |
+
"Increase max_length or shorten text/voice-prompt preamble."
|
| 316 |
+
)
|
| 317 |
+
ids_extended = ids_extended[cut:]
|
| 318 |
+
attn_extended = attn_extended[cut:]
|
| 319 |
+
acoustic_input_mask = acoustic_input_mask[cut:]
|
| 320 |
+
acoustic_loss_mask = acoustic_loss_mask[cut:]
|
| 321 |
+
|
| 322 |
+
sample_input_ids.append(ids_extended)
|
| 323 |
+
sample_attention_masks.append(attn_extended)
|
| 324 |
+
sample_acoustic_input_masks.append(acoustic_input_mask)
|
| 325 |
+
sample_acoustic_loss_masks.append(acoustic_loss_mask)
|
| 326 |
+
|
| 327 |
+
voice_speeches = []
|
| 328 |
+
voice_latent_lengths = []
|
| 329 |
+
if proc.get("speech_tensors") is not None:
|
| 330 |
+
voice_np = proc["speech_tensors"].cpu().numpy()
|
| 331 |
+
voice_masks = proc["speech_masks"].cpu().numpy().astype(bool)
|
| 332 |
+
for seg_idx in range(voice_np.shape[0]):
|
| 333 |
+
voice_speeches.append(voice_np[seg_idx])
|
| 334 |
+
voice_latent_lengths.append(int(voice_masks[seg_idx].sum()))
|
| 335 |
+
|
| 336 |
+
all_speech_waveforms.extend(voice_speeches)
|
| 337 |
+
all_speech_latent_lengths.extend(voice_latent_lengths)
|
| 338 |
+
per_segment_is_target.extend([False] * len(voice_speeches))
|
| 339 |
+
|
| 340 |
+
all_speech_waveforms.append(wav_target)
|
| 341 |
+
all_speech_latent_lengths.append(target_latent_len)
|
| 342 |
+
per_segment_is_target.append(True)
|
| 343 |
+
|
| 344 |
+
max_seq_len = max(len(x) for x in sample_input_ids)
|
| 345 |
+
padded_input_ids = []
|
| 346 |
+
padded_attention_masks = []
|
| 347 |
+
padded_acoustic_input_masks = []
|
| 348 |
+
padded_acoustic_loss_masks = []
|
| 349 |
+
tok = self.processor.tokenizer
|
| 350 |
+
pad_token_id = getattr(tok, "pad_token_id", None)
|
| 351 |
+
if pad_token_id is None or pad_token_id < 0:
|
| 352 |
+
pad_token_id = getattr(tok, "eos_token_id", None)
|
| 353 |
+
if pad_token_id is None or pad_token_id < 0:
|
| 354 |
+
raise ValueError(
|
| 355 |
+
"Tokenizer has no pad_token_id or eos_token_id; please set one or pass a valid pad id."
|
| 356 |
+
)
|
| 357 |
+
for ids, attn, ain_mask, aloss_mask in zip(
|
| 358 |
+
sample_input_ids, sample_attention_masks, sample_acoustic_input_masks, sample_acoustic_loss_masks
|
| 359 |
+
):
|
| 360 |
+
pad_len = max_seq_len - len(ids)
|
| 361 |
+
padded_input_ids.append(ids + [pad_token_id] * pad_len)
|
| 362 |
+
padded_attention_masks.append(attn + [0] * pad_len)
|
| 363 |
+
padded_acoustic_input_masks.append(ain_mask + [False] * pad_len)
|
| 364 |
+
padded_acoustic_loss_masks.append(aloss_mask + [False] * pad_len)
|
| 365 |
+
|
| 366 |
+
input_ids_tensor = torch.tensor(padded_input_ids, dtype=torch.long)
|
| 367 |
+
attention_mask_tensor = torch.tensor(padded_attention_masks, dtype=torch.long)
|
| 368 |
+
acoustic_input_mask_tensor = torch.tensor(padded_acoustic_input_masks, dtype=torch.bool)
|
| 369 |
+
acoustic_loss_mask_tensor = torch.tensor(padded_acoustic_loss_masks, dtype=torch.bool)
|
| 370 |
+
|
| 371 |
+
if all_speech_waveforms:
|
| 372 |
+
max_wave_len = max(w.shape[0] for w in all_speech_waveforms)
|
| 373 |
+
padded_speeches = np.zeros((len(all_speech_waveforms), max_wave_len), dtype=np.float32)
|
| 374 |
+
for i, w in enumerate(all_speech_waveforms):
|
| 375 |
+
L = w.shape[0]
|
| 376 |
+
padded_speeches[i, :L] = w
|
| 377 |
+
|
| 378 |
+
max_latent_len = max(all_speech_latent_lengths) if all_speech_latent_lengths else 1
|
| 379 |
+
speech_masks_np = np.zeros((len(all_speech_waveforms), max_latent_len), dtype=np.bool_)
|
| 380 |
+
for i, L_lat in enumerate(all_speech_latent_lengths):
|
| 381 |
+
speech_masks_np[i, :L_lat] = True
|
| 382 |
+
|
| 383 |
+
speech_tensors_tensor = torch.tensor(padded_speeches, dtype=torch.float32)
|
| 384 |
+
speech_masks_tensor = torch.tensor(speech_masks_np, dtype=torch.bool)
|
| 385 |
+
|
| 386 |
+
speeches_loss_input_np = np.zeros_like(speech_masks_np, dtype=np.bool_)
|
| 387 |
+
for i, is_target in enumerate(per_segment_is_target):
|
| 388 |
+
if is_target:
|
| 389 |
+
speeches_loss_input_np[i] = speech_masks_np[i]
|
| 390 |
+
speeches_loss_input_tensor = torch.tensor(speeches_loss_input_np, dtype=torch.bool)
|
| 391 |
+
|
| 392 |
+
# Semantic features
|
| 393 |
+
if self.compute_semantics and hasattr(self.processor, "semantic_tokenizer") and self.processor.semantic_tokenizer is not None:
|
| 394 |
+
sem_feats: List[np.ndarray] = []
|
| 395 |
+
for w in all_speech_waveforms:
|
| 396 |
+
try:
|
| 397 |
+
# Expect [T, D] where T ≈ ceil(len(w)/compress_ratio)
|
| 398 |
+
sem = self.processor.semantic_tokenizer.encode(w)
|
| 399 |
+
sem = np.asarray(sem, dtype=np.float32)
|
| 400 |
+
except Exception:
|
| 401 |
+
sem = np.zeros((0, self.semantic_vae_dim), dtype=np.float32)
|
| 402 |
+
if sem.ndim != 2:
|
| 403 |
+
raise RuntimeError(f"Semantic tokenizer returned unexpected shape {sem.shape}. Expect [T, D].")
|
| 404 |
+
L = sem.shape[0]
|
| 405 |
+
D = sem.shape[1]
|
| 406 |
+
if D != self.semantic_vae_dim:
|
| 407 |
+
if D < self.semantic_vae_dim:
|
| 408 |
+
pad_d = np.zeros((L, self.semantic_vae_dim - D), dtype=np.float32)
|
| 409 |
+
sem = np.concatenate([sem, pad_d], axis=1)
|
| 410 |
+
else:
|
| 411 |
+
sem = sem[:, : self.semantic_vae_dim]
|
| 412 |
+
if L < max_latent_len:
|
| 413 |
+
pad = np.zeros((max_latent_len - L, self.semantic_vae_dim), dtype=np.float32)
|
| 414 |
+
sem = np.concatenate([sem, pad], axis=0)
|
| 415 |
+
elif L > max_latent_len:
|
| 416 |
+
sem = sem[:max_latent_len]
|
| 417 |
+
sem_feats.append(sem.astype(np.float32))
|
| 418 |
+
speech_semantic_tensors = torch.tensor(np.stack(sem_feats, axis=0), dtype=torch.float32)
|
| 419 |
+
else:
|
| 420 |
+
# Semantic tokenizer unavailable - use zero features as fallback
|
| 421 |
+
# این امکان برای preprocess مفید است که semantic_tokenizer بارگذاری نشود
|
| 422 |
+
if not self.compute_semantics:
|
| 423 |
+
# preprocess mode - صفر features
|
| 424 |
+
sem_feats = [np.zeros((max_latent_len, self.semantic_vae_dim), dtype=np.float32)
|
| 425 |
+
for _ in all_speech_waveforms]
|
| 426 |
+
speech_semantic_tensors = torch.tensor(np.stack(sem_feats, axis=0), dtype=torch.float32)
|
| 427 |
+
else:
|
| 428 |
+
# training mode - اگر compute_semantics = True باشد اما tokenizer نباشد، خطا
|
| 429 |
+
raise RuntimeError(
|
| 430 |
+
"Semantic features are required but could not be computed. "
|
| 431 |
+
"Ensure processor.semantic_tokenizer is available or set compute_semantics=False for preprocessing."
|
| 432 |
+
)
|
| 433 |
+
else:
|
| 434 |
+
speech_tensors_tensor = None
|
| 435 |
+
speech_masks_tensor = None
|
| 436 |
+
speeches_loss_input_tensor = None
|
| 437 |
+
speech_semantic_tensors = None # No segments in batch
|
| 438 |
+
|
| 439 |
+
if self.debug_checks:
|
| 440 |
+
assert (input_ids_tensor >= 0).all(), "input_ids contains negative indices"
|
| 441 |
+
if speech_tensors_tensor is not None:
|
| 442 |
+
assert speech_tensors_tensor.dim() == 2, "Expected speech_tensors 2D [segments, samples]"
|
| 443 |
+
|
| 444 |
+
return {
|
| 445 |
+
"input_ids": input_ids_tensor,
|
| 446 |
+
"attention_mask": attention_mask_tensor,
|
| 447 |
+
"speech_tensors": speech_tensors_tensor,
|
| 448 |
+
"speech_masks": speech_masks_tensor,
|
| 449 |
+
"speech_semantic_tensors": speech_semantic_tensors,
|
| 450 |
+
"acoustic_input_mask": acoustic_input_mask_tensor,
|
| 451 |
+
"acoustic_loss_mask": acoustic_loss_mask_tensor,
|
| 452 |
+
"speeches_loss_input": speeches_loss_input_tensor,
|
| 453 |
+
}
|
VibeVoice-finetuning/src/finetune_vibevoice_lora.py
ADDED
|
@@ -0,0 +1,902 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# train_vibevoice_lora.py
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from datasets import load_dataset, DatasetDict, VerificationMode
|
| 11 |
+
|
| 12 |
+
from transformers import (
|
| 13 |
+
HfArgumentParser,
|
| 14 |
+
Trainer,
|
| 15 |
+
set_seed,
|
| 16 |
+
TrainerCallback,
|
| 17 |
+
)
|
| 18 |
+
from transformers import TrainingArguments as HfTrainingArguments
|
| 19 |
+
|
| 20 |
+
from peft import LoraConfig, get_peft_model, TaskType
|
| 21 |
+
|
| 22 |
+
from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
|
| 23 |
+
from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
|
| 24 |
+
from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
|
| 25 |
+
|
| 26 |
+
from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
|
| 27 |
+
|
| 28 |
+
logger = logging.getLogger(__name__)
|
| 29 |
+
|
| 30 |
+
# ================== SAMPLE CALLBACK UTILS ==================
|
| 31 |
+
|
| 32 |
+
import copy
|
| 33 |
+
import torch
|
| 34 |
+
from transformers import TrainerCallback
|
| 35 |
+
|
| 36 |
+
class EmaCallback(TrainerCallback):
|
| 37 |
+
def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cpu"):
|
| 38 |
+
"""
|
| 39 |
+
attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
|
| 40 |
+
decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
|
| 41 |
+
"""
|
| 42 |
+
self.attr_path = attr_path
|
| 43 |
+
self.decay = float(decay)
|
| 44 |
+
self.device = torch.device(device)
|
| 45 |
+
self.shadow = None
|
| 46 |
+
self._orig = None # store non-EMA weights when we swap
|
| 47 |
+
|
| 48 |
+
def _get_module(self, model):
|
| 49 |
+
# Resolve dotted path like "model.prediction_head"
|
| 50 |
+
mod = model
|
| 51 |
+
for name in self.attr_path.split('.'):
|
| 52 |
+
mod = getattr(mod, name)
|
| 53 |
+
return mod
|
| 54 |
+
|
| 55 |
+
def on_train_begin(self, args, state, control, model=None, **kwargs):
|
| 56 |
+
head = self._get_module(model)
|
| 57 |
+
self.shadow = {k: p.detach().to(self.device).clone()
|
| 58 |
+
for k, p in head.state_dict().items()}
|
| 59 |
+
|
| 60 |
+
def on_step_end(self, args, state, control, model=None, **kwargs):
|
| 61 |
+
if self.shadow is None: return
|
| 62 |
+
head = self._get_module(model)
|
| 63 |
+
with torch.no_grad():
|
| 64 |
+
for k, v in head.state_dict().items():
|
| 65 |
+
self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
|
| 66 |
+
|
| 67 |
+
# ---- Swap helpers ----
|
| 68 |
+
def _swap_in_ema(self, model):
|
| 69 |
+
head = self._get_module(model)
|
| 70 |
+
self._orig = copy.deepcopy(head.state_dict())
|
| 71 |
+
head.load_state_dict(self.shadow, strict=False)
|
| 72 |
+
|
| 73 |
+
def _swap_back(self, model):
|
| 74 |
+
if self._orig is None: return
|
| 75 |
+
head = self._get_module(model)
|
| 76 |
+
head.load_state_dict(self._orig, strict=False)
|
| 77 |
+
self._orig = None
|
| 78 |
+
|
| 79 |
+
def on_evaluate(self, args, state, control, model=None, **kwargs):
|
| 80 |
+
# use EMA during eval
|
| 81 |
+
self._swap_in_ema(model)
|
| 82 |
+
|
| 83 |
+
def on_evaluate_end(self, args, state, control, model=None, **kwargs):
|
| 84 |
+
self._swap_back(model)
|
| 85 |
+
|
| 86 |
+
def on_save(self, args, state, control, model=None, **kwargs):
|
| 87 |
+
# temporarily swap to EMA, let Trainer save, then swap back
|
| 88 |
+
self._swap_in_ema(model)
|
| 89 |
+
|
| 90 |
+
def on_save_end(self, args, state, control, model=None, **kwargs):
|
| 91 |
+
self._swap_back(model)
|
| 92 |
+
|
| 93 |
+
def on_train_end(self, args, state, control, model=None, **kwargs):
|
| 94 |
+
# final checkpoint: persist EMA
|
| 95 |
+
self._swap_in_ema(model)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@dataclass
|
| 99 |
+
class ModelArguments:
|
| 100 |
+
model_name_or_path: Optional[str] = field(
|
| 101 |
+
default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
|
| 102 |
+
)
|
| 103 |
+
processor_name_or_path: Optional[str] = field(
|
| 104 |
+
default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
|
| 105 |
+
)
|
| 106 |
+
cache_dir: Optional[str] = field(default=None)
|
| 107 |
+
freeze_acoustic_tokenizer: bool = field(default=True)
|
| 108 |
+
freeze_semantic_tokenizer: bool = field(default=True)
|
| 109 |
+
lora_r: int = field(default=8)
|
| 110 |
+
lora_alpha: int = field(default=32)
|
| 111 |
+
lora_dropout: float = field(default=0.05)
|
| 112 |
+
lora_target_modules: str = field(
|
| 113 |
+
default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
|
| 114 |
+
metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
|
| 115 |
+
)
|
| 116 |
+
lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
|
| 117 |
+
train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
|
| 118 |
+
train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
|
| 119 |
+
layers_to_freeze: Optional[str] = field(
|
| 120 |
+
default=None,
|
| 121 |
+
metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
@dataclass
|
| 125 |
+
class DataArguments:
|
| 126 |
+
dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
|
| 127 |
+
dataset_config_name: Optional[str] = field(default=None)
|
| 128 |
+
train_split_name: str = field(default="train")
|
| 129 |
+
eval_split_name: Optional[str] = field(default="validation")
|
| 130 |
+
text_column_name: str = field(default="text")
|
| 131 |
+
audio_column_name: str = field(default="audio")
|
| 132 |
+
voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
|
| 133 |
+
eval_split_size: float = field(default=0.0)
|
| 134 |
+
ignore_verifications: bool = field(default=False)
|
| 135 |
+
max_length: Optional[int] = field(default=None)
|
| 136 |
+
train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
|
| 137 |
+
validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
|
| 138 |
+
voice_prompt_drop_rate: float = field(
|
| 139 |
+
default=0.0,
|
| 140 |
+
metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
@dataclass
|
| 144 |
+
class CustomTrainingArguments(HfTrainingArguments):
|
| 145 |
+
ddpm_batch_mul: int = field(default=1)
|
| 146 |
+
ce_loss_weight: float = field(default=1.0)
|
| 147 |
+
diffusion_loss_weight: float = field(default=1.0)
|
| 148 |
+
debug_ce_details: bool = field(default=False)
|
| 149 |
+
debug_ce_topk: int = field(default=5)
|
| 150 |
+
debug_ce_max_examples: int = field(default=1)
|
| 151 |
+
debug_ce_every_n_steps: int = field(default=200)
|
| 152 |
+
gradient_clipping: bool = field(
|
| 153 |
+
default=False,
|
| 154 |
+
metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
|
| 155 |
+
)
|
| 156 |
+
debug_save: bool = field(
|
| 157 |
+
default=False,
|
| 158 |
+
metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def build_lora_config(args: ModelArguments) -> LoraConfig:
|
| 162 |
+
target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
|
| 163 |
+
return LoraConfig(
|
| 164 |
+
r=args.lora_r,
|
| 165 |
+
lora_alpha=args.lora_alpha,
|
| 166 |
+
lora_dropout=args.lora_dropout,
|
| 167 |
+
bias="none",
|
| 168 |
+
task_type=TaskType.CAUSAL_LM,
|
| 169 |
+
target_modules=target_modules,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
def build_head_lora_config(args: ModelArguments) -> LoraConfig:
|
| 173 |
+
target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
|
| 174 |
+
return LoraConfig(
|
| 175 |
+
r=args.lora_r,
|
| 176 |
+
lora_alpha=args.lora_alpha,
|
| 177 |
+
lora_dropout=args.lora_dropout,
|
| 178 |
+
bias="none",
|
| 179 |
+
task_type=TaskType.FEATURE_EXTRACTION,
|
| 180 |
+
target_modules=target_modules,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
|
| 184 |
+
shifted = labels[:, 1:].contiguous()
|
| 185 |
+
base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
|
| 186 |
+
label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
|
| 187 |
+
final_mask = base_mask & (~label_is_acoustic)
|
| 188 |
+
out = shifted.clone()
|
| 189 |
+
out[~final_mask] = pad_id
|
| 190 |
+
return out
|
| 191 |
+
|
| 192 |
+
def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
|
| 193 |
+
try:
|
| 194 |
+
acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
|
| 195 |
+
if acoustic is None or not hasattr(acoustic, "encode"):
|
| 196 |
+
logger_.warning("No acoustic_tokenizer.encode() found to patch.")
|
| 197 |
+
return
|
| 198 |
+
base_encode = acoustic.encode
|
| 199 |
+
def encode_wrapped(*args, **kwargs):
|
| 200 |
+
out = base_encode(*args, **kwargs)
|
| 201 |
+
try:
|
| 202 |
+
_ = out[0][0]
|
| 203 |
+
return out
|
| 204 |
+
except Exception:
|
| 205 |
+
pass
|
| 206 |
+
if isinstance(out, dict):
|
| 207 |
+
for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
|
| 208 |
+
if k in out:
|
| 209 |
+
return [[out[k]]]
|
| 210 |
+
if len(out) > 0:
|
| 211 |
+
return [[next(iter(out.values()))]]
|
| 212 |
+
for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
|
| 213 |
+
if hasattr(out, attr):
|
| 214 |
+
return [[getattr(out, attr)]]
|
| 215 |
+
try:
|
| 216 |
+
if isinstance(out, torch.Tensor):
|
| 217 |
+
return [[out]]
|
| 218 |
+
except Exception:
|
| 219 |
+
pass
|
| 220 |
+
return [[out]]
|
| 221 |
+
acoustic.encode = encode_wrapped
|
| 222 |
+
logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
|
| 223 |
+
except Exception as e:
|
| 224 |
+
logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
|
| 225 |
+
|
| 226 |
+
def main() -> None:
|
| 227 |
+
parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
|
| 228 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 229 |
+
|
| 230 |
+
logging.basicConfig(
|
| 231 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 232 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 233 |
+
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
|
| 234 |
+
)
|
| 235 |
+
logger.info("Training/evaluation parameters %s", training_args)
|
| 236 |
+
set_seed(training_args.seed)
|
| 237 |
+
|
| 238 |
+
# Configure gradient clipping
|
| 239 |
+
if not getattr(training_args, "gradient_clipping", False):
|
| 240 |
+
if hasattr(training_args, "max_grad_norm"):
|
| 241 |
+
training_args.max_grad_norm = 0.0
|
| 242 |
+
logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
|
| 243 |
+
else:
|
| 244 |
+
if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
|
| 245 |
+
training_args.max_grad_norm = 1.0
|
| 246 |
+
logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
|
| 247 |
+
|
| 248 |
+
# Load processor
|
| 249 |
+
processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
|
| 250 |
+
if processor_path is None:
|
| 251 |
+
raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
|
| 252 |
+
processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
|
| 253 |
+
|
| 254 |
+
# Required special tokens
|
| 255 |
+
tok = processor.tokenizer
|
| 256 |
+
for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
|
| 257 |
+
if not hasattr(tok, required) or getattr(tok, required) is None:
|
| 258 |
+
raise RuntimeError(f"Tokenizer missing required special id: {required}")
|
| 259 |
+
|
| 260 |
+
# Load model
|
| 261 |
+
if model_args.model_name_or_path is None:
|
| 262 |
+
raise ValueError("--model_name_or_path is required to load VibeVoice base model")
|
| 263 |
+
dtype = torch.float32
|
| 264 |
+
if training_args.bf16:
|
| 265 |
+
dtype = torch.bfloat16
|
| 266 |
+
elif getattr(training_args, "fp16", False):
|
| 267 |
+
dtype = torch.float16
|
| 268 |
+
model = VibeVoiceForConditionalGeneration.from_pretrained(
|
| 269 |
+
model_args.model_name_or_path,
|
| 270 |
+
torch_dtype=dtype,
|
| 271 |
+
)
|
| 272 |
+
_patch_acoustic_encode_for_legacy_indexing(model, logger)
|
| 273 |
+
processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
|
| 274 |
+
|
| 275 |
+
# Diagnostics: LM head tie
|
| 276 |
+
try:
|
| 277 |
+
in_emb_mod = model.get_input_embeddings()
|
| 278 |
+
out_emb_mod = model.get_output_embeddings()
|
| 279 |
+
in_w = getattr(in_emb_mod, "weight", None)
|
| 280 |
+
out_w = getattr(out_emb_mod, "weight", None)
|
| 281 |
+
shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
|
| 282 |
+
values_equal = False
|
| 283 |
+
if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
|
| 284 |
+
try:
|
| 285 |
+
values_equal = bool(torch.allclose(in_w, out_w))
|
| 286 |
+
except Exception:
|
| 287 |
+
values_equal = False
|
| 288 |
+
try:
|
| 289 |
+
tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
|
| 290 |
+
except Exception:
|
| 291 |
+
tie_cfg = getattr(model.config, "tie_word_embeddings", None)
|
| 292 |
+
logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
|
| 293 |
+
if out_w is not None:
|
| 294 |
+
logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
|
| 295 |
+
except Exception as e:
|
| 296 |
+
logger.warning(f"LM head tie diagnostics failed: {e}")
|
| 297 |
+
|
| 298 |
+
# Hard-tie LM head
|
| 299 |
+
try:
|
| 300 |
+
emb_module = model.get_input_embeddings()
|
| 301 |
+
head_module = model.get_output_embeddings()
|
| 302 |
+
if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
|
| 303 |
+
if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
|
| 304 |
+
with torch.no_grad():
|
| 305 |
+
head_module.weight = emb_module.weight
|
| 306 |
+
logger.info("Force-tied LM head weight to input embeddings (pointer share).")
|
| 307 |
+
except Exception as e:
|
| 308 |
+
logger.warning(f"Force-tie of LM head failed: {e}")
|
| 309 |
+
|
| 310 |
+
# Validate special IDs (info logs only)
|
| 311 |
+
try:
|
| 312 |
+
special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
|
| 313 |
+
try:
|
| 314 |
+
vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
|
| 315 |
+
except Exception:
|
| 316 |
+
vocab_size = 0
|
| 317 |
+
in_emb_mod = model.get_input_embeddings()
|
| 318 |
+
out_emb_mod = model.get_output_embeddings()
|
| 319 |
+
in_w = getattr(in_emb_mod, "weight", None)
|
| 320 |
+
out_w = getattr(out_emb_mod, "weight", None)
|
| 321 |
+
for name in special_names:
|
| 322 |
+
val = getattr(tok, name, None)
|
| 323 |
+
exists = (val is not None)
|
| 324 |
+
in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
|
| 325 |
+
equal_row = None
|
| 326 |
+
if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
|
| 327 |
+
try:
|
| 328 |
+
equal_row = bool(torch.allclose(in_w[val], out_w[val]))
|
| 329 |
+
except Exception:
|
| 330 |
+
equal_row = False
|
| 331 |
+
decoded_str = None
|
| 332 |
+
if exists and isinstance(val, int):
|
| 333 |
+
try:
|
| 334 |
+
decoded_str = tok.decode([val])
|
| 335 |
+
except Exception:
|
| 336 |
+
try:
|
| 337 |
+
decoded_str = tok.convert_ids_to_tokens(val)
|
| 338 |
+
except Exception:
|
| 339 |
+
decoded_str = "<decode_failed>"
|
| 340 |
+
logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
|
| 341 |
+
except Exception as e:
|
| 342 |
+
logger.warning(f"Special token ID/row validation failed: {e}")
|
| 343 |
+
|
| 344 |
+
# Quick tokenizer diagnostics (optional)
|
| 345 |
+
try:
|
| 346 |
+
logger.info("=== TOKENIZER DIAGNOSTICS ===")
|
| 347 |
+
logger.info(f"Tokenizer class: {type(tok).__name__}")
|
| 348 |
+
logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
|
| 349 |
+
# tiny CE smoke test
|
| 350 |
+
with torch.no_grad():
|
| 351 |
+
simple_text = "The cat sat on the mat."
|
| 352 |
+
simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
|
| 353 |
+
simple_mask = torch.ones_like(simple_ids)
|
| 354 |
+
x = model.get_input_embeddings()(simple_ids)
|
| 355 |
+
outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
|
| 356 |
+
logits = model.lm_head(outputs.last_hidden_state)
|
| 357 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 358 |
+
shift_labels = simple_ids[:, 1:].contiguous()
|
| 359 |
+
ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
|
| 360 |
+
logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
|
| 361 |
+
except Exception as e:
|
| 362 |
+
logger.warning(f"Tokenizer diagnostics failed: {e}")
|
| 363 |
+
|
| 364 |
+
# Disable cache during training
|
| 365 |
+
if hasattr(model.config, "use_cache") and training_args.do_train:
|
| 366 |
+
model.config.use_cache = False
|
| 367 |
+
|
| 368 |
+
# Freeze tokenizers
|
| 369 |
+
if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
|
| 370 |
+
for p in model.model.acoustic_tokenizer.parameters():
|
| 371 |
+
p.requires_grad = False
|
| 372 |
+
if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
|
| 373 |
+
for p in model.model.semantic_tokenizer.parameters():
|
| 374 |
+
p.requires_grad = False
|
| 375 |
+
|
| 376 |
+
# LoRA wrap LLM (optional)
|
| 377 |
+
lora_cfg = build_lora_config(model_args)
|
| 378 |
+
tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
|
| 379 |
+
skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
|
| 380 |
+
if not skip_lm_lora:
|
| 381 |
+
model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
|
| 382 |
+
else:
|
| 383 |
+
logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
|
| 384 |
+
|
| 385 |
+
try:
|
| 386 |
+
model.tie_weights()
|
| 387 |
+
except Exception:
|
| 388 |
+
pass
|
| 389 |
+
|
| 390 |
+
# Freeze all then enable trainable subsets
|
| 391 |
+
for _, p in model.named_parameters():
|
| 392 |
+
p.requires_grad = False
|
| 393 |
+
|
| 394 |
+
try:
|
| 395 |
+
for n, p in model.model.language_model.named_parameters():
|
| 396 |
+
if "lora_A" in n or "lora_B" in n:
|
| 397 |
+
p.requires_grad = True
|
| 398 |
+
except Exception:
|
| 399 |
+
logger.warning("Could not re-enable LoRA params on language_model.")
|
| 400 |
+
|
| 401 |
+
# Diffusion head LoRA wrapping (optional)
|
| 402 |
+
if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
|
| 403 |
+
class _HeadForwardShim(nn.Module):
|
| 404 |
+
def __init__(self, base: nn.Module): super().__init__(); self.base = base
|
| 405 |
+
def forward(self, *args, **kwargs):
|
| 406 |
+
if len(args) >= 3:
|
| 407 |
+
noisy_images, timesteps, condition = args[:3]
|
| 408 |
+
else:
|
| 409 |
+
noisy_images = kwargs.get("noisy_images")
|
| 410 |
+
timesteps = kwargs.get("timesteps")
|
| 411 |
+
condition = kwargs.get("condition")
|
| 412 |
+
return self.base(noisy_images, timesteps, condition)
|
| 413 |
+
try:
|
| 414 |
+
shim = _HeadForwardShim(model.model.prediction_head)
|
| 415 |
+
model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
|
| 416 |
+
for n, p in model.model.prediction_head.named_parameters():
|
| 417 |
+
if "lora_A" in n or "lora_B" in n:
|
| 418 |
+
p.requires_grad = True
|
| 419 |
+
except Exception as e:
|
| 420 |
+
logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
|
| 421 |
+
|
| 422 |
+
# Train full diffusion head (optional)
|
| 423 |
+
if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
|
| 424 |
+
for p in model.model.prediction_head.parameters():
|
| 425 |
+
p.requires_grad = True
|
| 426 |
+
|
| 427 |
+
# Freeze diffusion head layers (optional)
|
| 428 |
+
if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
|
| 429 |
+
head_params = list(model.model.prediction_head.named_parameters())
|
| 430 |
+
try:
|
| 431 |
+
indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
|
| 432 |
+
frozen_count = 0
|
| 433 |
+
for i, (name, param) in enumerate(head_params):
|
| 434 |
+
if i in indices_to_freeze:
|
| 435 |
+
param.requires_grad = False
|
| 436 |
+
frozen_count += 1
|
| 437 |
+
logger.info(f"Froze layer [{i}]: {name}")
|
| 438 |
+
logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
|
| 439 |
+
except Exception as e:
|
| 440 |
+
logger.error(f"Could not parse --layers_to_freeze: {e}")
|
| 441 |
+
raise
|
| 442 |
+
|
| 443 |
+
# Connectors
|
| 444 |
+
if getattr(model_args, "train_connectors", False):
|
| 445 |
+
if hasattr(model.model, "acoustic_connector"):
|
| 446 |
+
for p in model.model.acoustic_connector.parameters():
|
| 447 |
+
p.requires_grad = True
|
| 448 |
+
if hasattr(model.model, "semantic_connector"):
|
| 449 |
+
for p in model.model.semantic_connector.parameters():
|
| 450 |
+
p.requires_grad = True
|
| 451 |
+
else:
|
| 452 |
+
if hasattr(model.model, "acoustic_connector"):
|
| 453 |
+
for p in model.model.acoustic_connector.parameters():
|
| 454 |
+
p.requires_grad = False
|
| 455 |
+
if hasattr(model.model, "semantic_connector"):
|
| 456 |
+
for p in model.model.semantic_connector.parameters():
|
| 457 |
+
p.requires_grad = False
|
| 458 |
+
|
| 459 |
+
# Freeze embedding + head
|
| 460 |
+
try:
|
| 461 |
+
emb = model.get_input_embeddings()
|
| 462 |
+
if hasattr(emb, "weight"):
|
| 463 |
+
emb.weight.requires_grad_(False)
|
| 464 |
+
head = model.get_output_embeddings()
|
| 465 |
+
if head is not None and hasattr(head, "weight"):
|
| 466 |
+
head.weight.requires_grad_(False)
|
| 467 |
+
except Exception:
|
| 468 |
+
pass
|
| 469 |
+
|
| 470 |
+
# Diagnostics
|
| 471 |
+
def _sum_params(named_iter):
|
| 472 |
+
return sum(p.numel() for _, p in named_iter if p.requires_grad)
|
| 473 |
+
try:
|
| 474 |
+
lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
|
| 475 |
+
pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
|
| 476 |
+
ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
|
| 477 |
+
se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
|
| 478 |
+
total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 479 |
+
logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
|
| 480 |
+
logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
|
| 481 |
+
except Exception:
|
| 482 |
+
pass
|
| 483 |
+
|
| 484 |
+
# Datasets
|
| 485 |
+
verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
|
| 486 |
+
if data_args.train_jsonl is not None:
|
| 487 |
+
data_files: Dict[str, str] = {"train": data_args.train_jsonl}
|
| 488 |
+
if data_args.validation_jsonl is not None:
|
| 489 |
+
data_files["validation"] = data_args.validation_jsonl
|
| 490 |
+
raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
|
| 491 |
+
else:
|
| 492 |
+
if data_args.dataset_name is None:
|
| 493 |
+
raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
|
| 494 |
+
raw = load_dataset(
|
| 495 |
+
data_args.dataset_name,
|
| 496 |
+
data_args.dataset_config_name,
|
| 497 |
+
verification_mode=verification_mode,
|
| 498 |
+
cache_dir=model_args.cache_dir,
|
| 499 |
+
)
|
| 500 |
+
train_ds = raw[data_args.train_split_name]
|
| 501 |
+
eval_ds = None
|
| 502 |
+
if training_args.do_eval:
|
| 503 |
+
if data_args.eval_split_name and data_args.eval_split_name in raw:
|
| 504 |
+
eval_ds = raw[data_args.eval_split_name]
|
| 505 |
+
elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
|
| 506 |
+
split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
|
| 507 |
+
train_ds, eval_ds = split["train"], split["test"]
|
| 508 |
+
|
| 509 |
+
train_dataset = VibeVoiceDataset(
|
| 510 |
+
train_ds,
|
| 511 |
+
text_column=data_args.text_column_name,
|
| 512 |
+
audio_column=data_args.audio_column_name,
|
| 513 |
+
voice_prompts_column=data_args.voice_prompts_column_name,
|
| 514 |
+
)
|
| 515 |
+
eval_dataset = None
|
| 516 |
+
if eval_ds is not None:
|
| 517 |
+
eval_dataset = VibeVoiceDataset(
|
| 518 |
+
eval_ds,
|
| 519 |
+
text_column=data_args.text_column_name,
|
| 520 |
+
audio_column=data_args.audio_column_name,
|
| 521 |
+
voice_prompts_column=data_args.voice_prompts_column_name,
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
# Ratios/dims from processor+model
|
| 525 |
+
speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
|
| 526 |
+
semantic_dim = getattr(model.config, "semantic_vae_dim", None)
|
| 527 |
+
if semantic_dim is None:
|
| 528 |
+
try:
|
| 529 |
+
semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
|
| 530 |
+
except Exception:
|
| 531 |
+
semantic_dim = 128
|
| 532 |
+
|
| 533 |
+
compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
|
| 534 |
+
|
| 535 |
+
data_collator = VibeVoiceCollator(
|
| 536 |
+
processor=processor,
|
| 537 |
+
max_length=data_args.max_length,
|
| 538 |
+
speech_compress_ratio=speech_compress_ratio,
|
| 539 |
+
semantic_vae_dim=semantic_dim,
|
| 540 |
+
compute_semantics=compute_semantics_flag,
|
| 541 |
+
debug_checks=False,
|
| 542 |
+
voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
class LoRADebugCallback(TrainerCallback):
|
| 546 |
+
def __init__(self, log_every_n_steps: int = 50):
|
| 547 |
+
self.log_every_n_steps = max(1, int(log_every_n_steps))
|
| 548 |
+
self.prev_param_norms: Dict[str, float] = {}
|
| 549 |
+
self.lora_param_names: List[str] = []
|
| 550 |
+
|
| 551 |
+
def on_train_begin(self, args, state, control, model=None, **kwargs):
|
| 552 |
+
try:
|
| 553 |
+
if model is None:
|
| 554 |
+
return
|
| 555 |
+
named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
|
| 556 |
+
self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
|
| 557 |
+
for n in self.lora_param_names:
|
| 558 |
+
p = named[n]
|
| 559 |
+
self.prev_param_norms[n] = float(p.data.norm().item())
|
| 560 |
+
total = len(self.lora_param_names)
|
| 561 |
+
req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
|
| 562 |
+
num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
|
| 563 |
+
num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
|
| 564 |
+
zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
|
| 565 |
+
logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
|
| 566 |
+
if total == 0:
|
| 567 |
+
logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
|
| 568 |
+
if req_grad != total:
|
| 569 |
+
logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
|
| 570 |
+
except Exception as e:
|
| 571 |
+
logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
|
| 572 |
+
|
| 573 |
+
def on_step_end(self, args, state, control, model=None, **kwargs):
|
| 574 |
+
try:
|
| 575 |
+
if model is None or len(self.lora_param_names) == 0:
|
| 576 |
+
return
|
| 577 |
+
step = int(getattr(state, "global_step", 0) or 0)
|
| 578 |
+
if step % self.log_every_n_steps != 0 and step != 1:
|
| 579 |
+
return
|
| 580 |
+
named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
|
| 581 |
+
changed_A = 0
|
| 582 |
+
changed_B = 0
|
| 583 |
+
zero_B = 0
|
| 584 |
+
eps = 1e-12
|
| 585 |
+
for n in self.lora_param_names:
|
| 586 |
+
p = named.get(n, None)
|
| 587 |
+
if p is None:
|
| 588 |
+
continue
|
| 589 |
+
prev = self.prev_param_norms.get(n, 0.0)
|
| 590 |
+
curr = float(p.data.norm().item())
|
| 591 |
+
if "lora_A" in n and abs(curr - prev) > eps:
|
| 592 |
+
changed_A += 1
|
| 593 |
+
if "lora_B" in n:
|
| 594 |
+
if abs(curr - prev) > eps:
|
| 595 |
+
changed_B += 1
|
| 596 |
+
if curr == 0.0:
|
| 597 |
+
zero_B += 1
|
| 598 |
+
self.prev_param_norms[n] = curr
|
| 599 |
+
total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
|
| 600 |
+
total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
|
| 601 |
+
logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
|
| 602 |
+
except Exception as e:
|
| 603 |
+
logger.warning(f"LoRA debug (on_step_end) failed: {e}")
|
| 604 |
+
|
| 605 |
+
class VibeVoiceTrainer(Trainer):
|
| 606 |
+
def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
|
| 607 |
+
labels = inputs.get("input_ids")
|
| 608 |
+
attention_mask = inputs.get("attention_mask")
|
| 609 |
+
acoustic_input_mask = inputs.get("acoustic_input_mask")
|
| 610 |
+
|
| 611 |
+
# Ensure semantic tensors exist and have correct dtype/device
|
| 612 |
+
sem = inputs.get("speech_semantic_tensors", None)
|
| 613 |
+
try:
|
| 614 |
+
target_dtype = next(model.model.semantic_connector.parameters()).dtype
|
| 615 |
+
except Exception:
|
| 616 |
+
target_dtype = model.get_input_embeddings().weight.dtype
|
| 617 |
+
|
| 618 |
+
if sem is None:
|
| 619 |
+
sm = inputs.get("speech_masks")
|
| 620 |
+
if sm is not None:
|
| 621 |
+
zeros = torch.zeros(
|
| 622 |
+
sm.size(0), sm.size(1),
|
| 623 |
+
getattr(model.config, "semantic_vae_dim", 128),
|
| 624 |
+
dtype=target_dtype,
|
| 625 |
+
device=sm.device,
|
| 626 |
+
)
|
| 627 |
+
inputs["speech_semantic_tensors"] = zeros
|
| 628 |
+
else:
|
| 629 |
+
if isinstance(sem, torch.Tensor):
|
| 630 |
+
inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
|
| 631 |
+
|
| 632 |
+
outputs = model(
|
| 633 |
+
input_ids=inputs.get("input_ids"),
|
| 634 |
+
attention_mask=attention_mask,
|
| 635 |
+
speech_tensors=inputs.get("speech_tensors"),
|
| 636 |
+
speech_masks=inputs.get("speech_masks"),
|
| 637 |
+
speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
|
| 638 |
+
acoustic_input_mask=acoustic_input_mask,
|
| 639 |
+
acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
|
| 640 |
+
speeches_loss_input=inputs.get("speeches_loss_input"),
|
| 641 |
+
ddpm_batch_mul=training_args.ddpm_batch_mul,
|
| 642 |
+
)
|
| 643 |
+
|
| 644 |
+
# Invariants: token/latent selection equality across views (warn, don't assert)
|
| 645 |
+
try:
|
| 646 |
+
al_mask = inputs.get("acoustic_loss_mask")
|
| 647 |
+
sp_masks = inputs.get("speech_masks")
|
| 648 |
+
sp_loss_sel = inputs.get("speeches_loss_input")
|
| 649 |
+
num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
|
| 650 |
+
num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
|
| 651 |
+
num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
|
| 652 |
+
num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
|
| 653 |
+
self.log({
|
| 654 |
+
"debug/num_tok_total": float(num_tok_total),
|
| 655 |
+
"debug/num_tok_loss": float(num_tok_loss),
|
| 656 |
+
"debug/num_lat_total": float(num_lat_total),
|
| 657 |
+
"debug/num_lat_loss": float(num_lat_loss),
|
| 658 |
+
})
|
| 659 |
+
if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
|
| 660 |
+
if num_tok_loss != num_lat_loss:
|
| 661 |
+
logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
|
| 662 |
+
except Exception:
|
| 663 |
+
pass
|
| 664 |
+
|
| 665 |
+
# CE Loss
|
| 666 |
+
logits = outputs.logits
|
| 667 |
+
ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
|
| 668 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 669 |
+
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
|
| 670 |
+
ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
|
| 671 |
+
|
| 672 |
+
# Optional CE diagnostics
|
| 673 |
+
try:
|
| 674 |
+
self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
|
| 675 |
+
except Exception as e:
|
| 676 |
+
logger.warning(f"Failed invoking CE debug: {e}")
|
| 677 |
+
|
| 678 |
+
# Diffusion loss
|
| 679 |
+
diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
|
| 680 |
+
total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
|
| 681 |
+
|
| 682 |
+
# Logs
|
| 683 |
+
try:
|
| 684 |
+
prefix = "train" if model.training else "eval"
|
| 685 |
+
self.log({
|
| 686 |
+
f"{prefix}/ce_loss": ce_loss.detach().item(),
|
| 687 |
+
f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
|
| 688 |
+
})
|
| 689 |
+
if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
|
| 690 |
+
lr_val = self.optimizer.param_groups[0].get("lr", None)
|
| 691 |
+
if lr_val is not None:
|
| 692 |
+
self.log({"train/learning_rate_real": float(lr_val)})
|
| 693 |
+
except Exception:
|
| 694 |
+
pass
|
| 695 |
+
|
| 696 |
+
return (total, outputs) if return_outputs else total
|
| 697 |
+
|
| 698 |
+
def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
|
| 699 |
+
try:
|
| 700 |
+
if not getattr(training_args, "debug_ce_details", False):
|
| 701 |
+
return
|
| 702 |
+
step = int(getattr(self.state, "global_step", 0) or 0)
|
| 703 |
+
every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
|
| 704 |
+
if not (step <= 1 or (step % every_n == 0)):
|
| 705 |
+
return
|
| 706 |
+
|
| 707 |
+
with torch.no_grad():
|
| 708 |
+
vocab = shift_logits.size(-1)
|
| 709 |
+
per_token_loss = F.cross_entropy(
|
| 710 |
+
shift_logits.view(-1, vocab),
|
| 711 |
+
ce_labels.view(-1),
|
| 712 |
+
reduction="none",
|
| 713 |
+
ignore_index=-100,
|
| 714 |
+
).view_as(ce_labels)
|
| 715 |
+
|
| 716 |
+
valid_mask = ce_labels.ne(-100)
|
| 717 |
+
num_valid = int(valid_mask.sum().item())
|
| 718 |
+
avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
|
| 719 |
+
|
| 720 |
+
per_ex_avgs = []
|
| 721 |
+
max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
|
| 722 |
+
B = ce_labels.size(0)
|
| 723 |
+
for b in range(min(B, max_examples)):
|
| 724 |
+
vb = valid_mask[b]
|
| 725 |
+
if int(vb.sum().item()) > 0:
|
| 726 |
+
per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
|
| 727 |
+
else:
|
| 728 |
+
per_ex_avgs.append(float("nan"))
|
| 729 |
+
logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
|
| 730 |
+
except Exception as e:
|
| 731 |
+
logger.warning(f"CE detailed debug failed: {e}")
|
| 732 |
+
|
| 733 |
+
# --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
|
| 737 |
+
try:
|
| 738 |
+
target_dir = output_dir or self.args.output_dir
|
| 739 |
+
lora_out = os.path.join(target_dir, "lora")
|
| 740 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 741 |
+
|
| 742 |
+
# --- LLM PEFT adapters (if LoRA-wrapped) ---
|
| 743 |
+
language_model = getattr(self.model.model, "language_model", None)
|
| 744 |
+
if hasattr(language_model, "save_pretrained"):
|
| 745 |
+
language_model.save_pretrained(lora_out)
|
| 746 |
+
|
| 747 |
+
# --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
|
| 748 |
+
pred_head = getattr(self.model.model, "prediction_head", None)
|
| 749 |
+
if hasattr(pred_head, "save_pretrained"):
|
| 750 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 751 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 752 |
+
pred_head.save_pretrained(ph_dir)
|
| 753 |
+
|
| 754 |
+
# --- ALWAYS save FULL diffusion head state_dict for fallback ---
|
| 755 |
+
if pred_head is not None and hasattr(pred_head, "state_dict"):
|
| 756 |
+
sd = pred_head.state_dict()
|
| 757 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 758 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 759 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 760 |
+
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
|
| 761 |
+
|
| 762 |
+
# --- Connectors (plain state_dicts) ---
|
| 763 |
+
ac = getattr(self.model.model, "acoustic_connector", None)
|
| 764 |
+
if ac is not None:
|
| 765 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 766 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 767 |
+
torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 768 |
+
|
| 769 |
+
se = getattr(self.model.model, "semantic_connector", None)
|
| 770 |
+
if se is not None:
|
| 771 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 772 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 773 |
+
torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 774 |
+
|
| 775 |
+
except Exception as e:
|
| 776 |
+
logger.warning(f"Failed to save LoRA assets: {e}")
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
# ------------- Build the Trainer -------------
|
| 780 |
+
|
| 781 |
+
# Resolve which adapters to apply in samples
|
| 782 |
+
|
| 783 |
+
ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cpu")
|
| 784 |
+
|
| 785 |
+
trainer = VibeVoiceTrainer(
|
| 786 |
+
model=model,
|
| 787 |
+
args=training_args,
|
| 788 |
+
train_dataset=train_dataset,
|
| 789 |
+
eval_dataset=eval_dataset,
|
| 790 |
+
data_collator=data_collator,
|
| 791 |
+
callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
|
| 792 |
+
)
|
| 793 |
+
|
| 794 |
+
# Optional debug pre-training save
|
| 795 |
+
if getattr(training_args, "debug_save", False):
|
| 796 |
+
try:
|
| 797 |
+
debug_dir = os.path.join(training_args.output_dir, "debug_initial")
|
| 798 |
+
lora_out = os.path.join(debug_dir, "lora")
|
| 799 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 800 |
+
logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
|
| 801 |
+
# language model adapters / base
|
| 802 |
+
try:
|
| 803 |
+
if hasattr(model.model.language_model, "save_pretrained"):
|
| 804 |
+
model.model.language_model.save_pretrained(lora_out)
|
| 805 |
+
except Exception as e_lm:
|
| 806 |
+
logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
|
| 807 |
+
# diffusion head
|
| 808 |
+
try:
|
| 809 |
+
if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
|
| 810 |
+
model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
|
| 811 |
+
except Exception as e_head:
|
| 812 |
+
logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
|
| 813 |
+
# NEW: full diffusion head state_dict as fallback
|
| 814 |
+
try:
|
| 815 |
+
ph = getattr(model.model, "prediction_head", None)
|
| 816 |
+
if ph is not None and hasattr(ph, "state_dict"):
|
| 817 |
+
sd = ph.state_dict()
|
| 818 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 819 |
+
os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
|
| 820 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
|
| 821 |
+
except Exception as e:
|
| 822 |
+
logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
|
| 823 |
+
# connectors
|
| 824 |
+
try:
|
| 825 |
+
ac_conn = getattr(model.model, "acoustic_connector", None)
|
| 826 |
+
if ac_conn is not None:
|
| 827 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 828 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 829 |
+
torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 830 |
+
except Exception as e_ac:
|
| 831 |
+
logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
|
| 832 |
+
try:
|
| 833 |
+
se_conn = getattr(model.model, "semantic_connector", None)
|
| 834 |
+
if se_conn is not None:
|
| 835 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 836 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 837 |
+
torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 838 |
+
except Exception as e_se:
|
| 839 |
+
logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
|
| 840 |
+
except Exception as e:
|
| 841 |
+
logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
|
| 842 |
+
|
| 843 |
+
if getattr(training_args, "gradient_checkpointing", False):
|
| 844 |
+
try:
|
| 845 |
+
model.gradient_checkpointing_enable()
|
| 846 |
+
except Exception:
|
| 847 |
+
logger.warning("Failed to enable gradient checkpointing on the model.")
|
| 848 |
+
|
| 849 |
+
if training_args.do_train:
|
| 850 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 851 |
+
|
| 852 |
+
lora_out = os.path.join(training_args.output_dir, "lora")
|
| 853 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 854 |
+
|
| 855 |
+
# LLM PEFT (if any)
|
| 856 |
+
lm = getattr(model.model, "language_model", None)
|
| 857 |
+
if hasattr(lm, "save_pretrained"):
|
| 858 |
+
lm.save_pretrained(lora_out)
|
| 859 |
+
|
| 860 |
+
# Diffusion head PEFT (if any)
|
| 861 |
+
ph = getattr(model.model, "prediction_head", None)
|
| 862 |
+
if hasattr(ph, "save_pretrained"):
|
| 863 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 864 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 865 |
+
ph.save_pretrained(ph_dir)
|
| 866 |
+
|
| 867 |
+
# ALWAYS: full diffusion head state_dict fallback
|
| 868 |
+
try:
|
| 869 |
+
if ph is not None and hasattr(ph, "state_dict"):
|
| 870 |
+
sd = ph.state_dict()
|
| 871 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 872 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 873 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 874 |
+
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
|
| 875 |
+
except Exception as e:
|
| 876 |
+
logger.warning(f"Failed to save FULL diffusion head at end: {e}")
|
| 877 |
+
|
| 878 |
+
# Connectors (if trained)
|
| 879 |
+
try:
|
| 880 |
+
ac = getattr(model.model, "acoustic_connector", None)
|
| 881 |
+
if ac is not None:
|
| 882 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 883 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 884 |
+
torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 885 |
+
except Exception as e:
|
| 886 |
+
logger.warning(f"Failed to save acoustic_connector: {e}")
|
| 887 |
+
|
| 888 |
+
try:
|
| 889 |
+
se = getattr(model.model, "semantic_connector", None)
|
| 890 |
+
if se is not None:
|
| 891 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 892 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 893 |
+
torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 894 |
+
except Exception as e:
|
| 895 |
+
logger.warning(f"Failed to save semantic_connector: {e}")
|
| 896 |
+
|
| 897 |
+
if training_args.do_eval and eval_dataset is not None:
|
| 898 |
+
trainer.evaluate()
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
if __name__ == "__main__":
|
| 902 |
+
main()
|
VibeVoice-finetuning/src/finetune_vibevoice_lora0.py
ADDED
|
@@ -0,0 +1,984 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# train_vibevoice_lora.py
|
| 2 |
+
import os
|
| 3 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
| 4 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
import os
|
| 8 |
+
from dataclasses import dataclass, field
|
| 9 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
from datasets import load_dataset, DatasetDict, VerificationMode
|
| 15 |
+
|
| 16 |
+
from transformers import (
|
| 17 |
+
HfArgumentParser,
|
| 18 |
+
Trainer,
|
| 19 |
+
set_seed,
|
| 20 |
+
TrainerCallback,
|
| 21 |
+
)
|
| 22 |
+
from transformers import TrainingArguments as HfTrainingArguments
|
| 23 |
+
|
| 24 |
+
from peft import LoraConfig, get_peft_model, TaskType
|
| 25 |
+
|
| 26 |
+
from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
|
| 27 |
+
from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
|
| 28 |
+
from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
|
| 29 |
+
|
| 30 |
+
from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
|
| 31 |
+
|
| 32 |
+
logger = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
# ================== SAMPLE CALLBACK UTILS ==================
|
| 35 |
+
|
| 36 |
+
import copy
|
| 37 |
+
import torch
|
| 38 |
+
from transformers import TrainerCallback
|
| 39 |
+
|
| 40 |
+
class EmaCallback(TrainerCallback):
|
| 41 |
+
def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cuda"):
|
| 42 |
+
"""
|
| 43 |
+
attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
|
| 44 |
+
decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
|
| 45 |
+
"""
|
| 46 |
+
self.attr_path = attr_path
|
| 47 |
+
self.decay = float(decay)
|
| 48 |
+
self.device = torch.device(device)
|
| 49 |
+
self.shadow = None
|
| 50 |
+
self._orig = None # store non-EMA weights when we swap
|
| 51 |
+
|
| 52 |
+
def _get_module(self, model):
|
| 53 |
+
# Resolve dotted path like "model.prediction_head"
|
| 54 |
+
mod = model
|
| 55 |
+
for name in self.attr_path.split('.'):
|
| 56 |
+
mod = getattr(mod, name)
|
| 57 |
+
return mod
|
| 58 |
+
|
| 59 |
+
def on_train_begin(self, args, state, control, model=None, **kwargs):
|
| 60 |
+
head = self._get_module(model)
|
| 61 |
+
self.shadow = {k: p.detach().to(self.device).clone()
|
| 62 |
+
for k, p in head.state_dict().items()}
|
| 63 |
+
|
| 64 |
+
def on_step_end(self, args, state, control, model=None, **kwargs):
|
| 65 |
+
if self.shadow is None: return
|
| 66 |
+
head = self._get_module(model)
|
| 67 |
+
with torch.no_grad():
|
| 68 |
+
for k, v in head.state_dict().items():
|
| 69 |
+
self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
|
| 70 |
+
|
| 71 |
+
# ---- Swap helpers ----
|
| 72 |
+
def _swap_in_ema(self, model):
|
| 73 |
+
head = self._get_module(model)
|
| 74 |
+
self._orig = copy.deepcopy(head.state_dict())
|
| 75 |
+
head.load_state_dict(self.shadow, strict=False)
|
| 76 |
+
|
| 77 |
+
def _swap_back(self, model):
|
| 78 |
+
if self._orig is None: return
|
| 79 |
+
head = self._get_module(model)
|
| 80 |
+
head.load_state_dict(self._orig, strict=False)
|
| 81 |
+
self._orig = None
|
| 82 |
+
|
| 83 |
+
def on_evaluate(self, args, state, control, model=None, **kwargs):
|
| 84 |
+
# use EMA during eval
|
| 85 |
+
self._swap_in_ema(model)
|
| 86 |
+
|
| 87 |
+
def on_evaluate_end(self, args, state, control, model=None, **kwargs):
|
| 88 |
+
self._swap_back(model)
|
| 89 |
+
|
| 90 |
+
def on_save(self, args, state, control, model=None, **kwargs):
|
| 91 |
+
# temporarily swap to EMA, let Trainer save, then swap back
|
| 92 |
+
self._swap_in_ema(model)
|
| 93 |
+
|
| 94 |
+
def on_save_end(self, args, state, control, model=None, **kwargs):
|
| 95 |
+
self._swap_back(model)
|
| 96 |
+
|
| 97 |
+
def on_train_end(self, args, state, control, model=None, **kwargs):
|
| 98 |
+
# final checkpoint: persist EMA
|
| 99 |
+
self._swap_in_ema(model)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@dataclass
|
| 103 |
+
class ModelArguments:
|
| 104 |
+
model_name_or_path: Optional[str] = field(
|
| 105 |
+
default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
|
| 106 |
+
)
|
| 107 |
+
processor_name_or_path: Optional[str] = field(
|
| 108 |
+
default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
|
| 109 |
+
)
|
| 110 |
+
cache_dir: Optional[str] = field(default=None)
|
| 111 |
+
freeze_acoustic_tokenizer: bool = field(default=True)
|
| 112 |
+
freeze_semantic_tokenizer: bool = field(default=True)
|
| 113 |
+
lora_r: int = field(default=8)
|
| 114 |
+
lora_alpha: int = field(default=32)
|
| 115 |
+
lora_dropout: float = field(default=0.05)
|
| 116 |
+
lora_target_modules: str = field(
|
| 117 |
+
default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
|
| 118 |
+
metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
|
| 119 |
+
)
|
| 120 |
+
lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
|
| 121 |
+
train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
|
| 122 |
+
train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
|
| 123 |
+
layers_to_freeze: Optional[str] = field(
|
| 124 |
+
default=None,
|
| 125 |
+
metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
@dataclass
|
| 129 |
+
class DataArguments:
|
| 130 |
+
dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
|
| 131 |
+
dataset_config_name: Optional[str] = field(default=None)
|
| 132 |
+
train_split_name: str = field(default="train")
|
| 133 |
+
eval_split_name: Optional[str] = field(default="validation")
|
| 134 |
+
text_column_name: str = field(default="text")
|
| 135 |
+
audio_column_name: str = field(default="audio")
|
| 136 |
+
voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
|
| 137 |
+
eval_split_size: float = field(default=0.0)
|
| 138 |
+
ignore_verifications: bool = field(default=False)
|
| 139 |
+
max_length: Optional[int] = field(default=None)
|
| 140 |
+
train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
|
| 141 |
+
validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
|
| 142 |
+
voice_prompt_drop_rate: float = field(
|
| 143 |
+
default=0.0,
|
| 144 |
+
metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
@dataclass
|
| 148 |
+
class CustomTrainingArguments(HfTrainingArguments):
|
| 149 |
+
ddpm_batch_mul: int = field(default=1)
|
| 150 |
+
ce_loss_weight: float = field(default=1.0)
|
| 151 |
+
diffusion_loss_weight: float = field(default=1.0)
|
| 152 |
+
debug_ce_details: bool = field(default=False)
|
| 153 |
+
debug_ce_topk: int = field(default=5)
|
| 154 |
+
debug_ce_max_examples: int = field(default=1)
|
| 155 |
+
debug_ce_every_n_steps: int = field(default=200)
|
| 156 |
+
gradient_clipping: bool = field(
|
| 157 |
+
default=False,
|
| 158 |
+
metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
|
| 159 |
+
)
|
| 160 |
+
debug_save: bool = field(
|
| 161 |
+
default=False,
|
| 162 |
+
metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
def build_lora_config(args: ModelArguments) -> LoraConfig:
|
| 166 |
+
target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
|
| 167 |
+
return LoraConfig(
|
| 168 |
+
r=args.lora_r,
|
| 169 |
+
lora_alpha=args.lora_alpha,
|
| 170 |
+
lora_dropout=args.lora_dropout,
|
| 171 |
+
bias="none",
|
| 172 |
+
task_type=TaskType.CAUSAL_LM,
|
| 173 |
+
target_modules=target_modules,
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
def build_head_lora_config(args: ModelArguments) -> LoraConfig:
|
| 177 |
+
target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
|
| 178 |
+
return LoraConfig(
|
| 179 |
+
r=args.lora_r,
|
| 180 |
+
lora_alpha=args.lora_alpha,
|
| 181 |
+
lora_dropout=args.lora_dropout,
|
| 182 |
+
bias="none",
|
| 183 |
+
task_type=TaskType.FEATURE_EXTRACTION,
|
| 184 |
+
target_modules=target_modules,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
|
| 188 |
+
shifted = labels[:, 1:].contiguous()
|
| 189 |
+
base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
|
| 190 |
+
label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
|
| 191 |
+
final_mask = base_mask & (~label_is_acoustic)
|
| 192 |
+
out = shifted.clone()
|
| 193 |
+
out[~final_mask] = pad_id
|
| 194 |
+
return out
|
| 195 |
+
|
| 196 |
+
def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
|
| 197 |
+
try:
|
| 198 |
+
acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
|
| 199 |
+
if acoustic is None or not hasattr(acoustic, "encode"):
|
| 200 |
+
logger_.warning("No acoustic_tokenizer.encode() found to patch.")
|
| 201 |
+
return
|
| 202 |
+
base_encode = acoustic.encode
|
| 203 |
+
def encode_wrapped(*args, **kwargs):
|
| 204 |
+
out = base_encode(*args, **kwargs)
|
| 205 |
+
try:
|
| 206 |
+
_ = out[0][0]
|
| 207 |
+
return out
|
| 208 |
+
except Exception:
|
| 209 |
+
pass
|
| 210 |
+
if isinstance(out, dict):
|
| 211 |
+
for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
|
| 212 |
+
if k in out:
|
| 213 |
+
return [[out[k]]]
|
| 214 |
+
if len(out) > 0:
|
| 215 |
+
return [[next(iter(out.values()))]]
|
| 216 |
+
for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
|
| 217 |
+
if hasattr(out, attr):
|
| 218 |
+
return [[getattr(out, attr)]]
|
| 219 |
+
try:
|
| 220 |
+
if isinstance(out, torch.Tensor):
|
| 221 |
+
return [[out]]
|
| 222 |
+
except Exception:
|
| 223 |
+
pass
|
| 224 |
+
return [[out]]
|
| 225 |
+
acoustic.encode = encode_wrapped
|
| 226 |
+
logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
|
| 227 |
+
except Exception as e:
|
| 228 |
+
logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
|
| 229 |
+
|
| 230 |
+
def main() -> None:
|
| 231 |
+
parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
|
| 232 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 233 |
+
|
| 234 |
+
logging.basicConfig(
|
| 235 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 236 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 237 |
+
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
|
| 238 |
+
)
|
| 239 |
+
logger.info("Training/evaluation parameters %s", training_args)
|
| 240 |
+
set_seed(training_args.seed)
|
| 241 |
+
|
| 242 |
+
# Configure gradient clipping
|
| 243 |
+
if not getattr(training_args, "gradient_clipping", False):
|
| 244 |
+
if hasattr(training_args, "max_grad_norm"):
|
| 245 |
+
training_args.max_grad_norm = 0.0
|
| 246 |
+
logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
|
| 247 |
+
else:
|
| 248 |
+
if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
|
| 249 |
+
training_args.max_grad_norm = 1.0
|
| 250 |
+
logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
|
| 251 |
+
|
| 252 |
+
# Load processor
|
| 253 |
+
processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
|
| 254 |
+
if processor_path is None:
|
| 255 |
+
raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
|
| 256 |
+
processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
|
| 257 |
+
|
| 258 |
+
# Required special tokens
|
| 259 |
+
tok = processor.tokenizer
|
| 260 |
+
for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
|
| 261 |
+
if not hasattr(tok, required) or getattr(tok, required) is None:
|
| 262 |
+
raise RuntimeError(f"Tokenizer missing required special id: {required}")
|
| 263 |
+
|
| 264 |
+
# Load model
|
| 265 |
+
if model_args.model_name_or_path is None:
|
| 266 |
+
raise ValueError("--model_name_or_path is required to load VibeVoice base model")
|
| 267 |
+
dtype = torch.float32
|
| 268 |
+
if training_args.bf16:
|
| 269 |
+
dtype = torch.bfloat16
|
| 270 |
+
elif getattr(training_args, "fp16", False):
|
| 271 |
+
dtype = torch.float16
|
| 272 |
+
model = VibeVoiceForConditionalGeneration.from_pretrained(
|
| 273 |
+
model_args.model_name_or_path,
|
| 274 |
+
torch_dtype=dtype, device_map={"": 0},
|
| 275 |
+
)
|
| 276 |
+
_patch_acoustic_encode_for_legacy_indexing(model, logger)
|
| 277 |
+
processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
|
| 278 |
+
|
| 279 |
+
# Diagnostics: LM head tie
|
| 280 |
+
try:
|
| 281 |
+
in_emb_mod = model.get_input_embeddings()
|
| 282 |
+
out_emb_mod = model.get_output_embeddings()
|
| 283 |
+
in_w = getattr(in_emb_mod, "weight", None)
|
| 284 |
+
out_w = getattr(out_emb_mod, "weight", None)
|
| 285 |
+
shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
|
| 286 |
+
values_equal = False
|
| 287 |
+
if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
|
| 288 |
+
try:
|
| 289 |
+
values_equal = bool(torch.allclose(in_w, out_w))
|
| 290 |
+
except Exception:
|
| 291 |
+
values_equal = False
|
| 292 |
+
try:
|
| 293 |
+
tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
|
| 294 |
+
except Exception:
|
| 295 |
+
tie_cfg = getattr(model.config, "tie_word_embeddings", None)
|
| 296 |
+
logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
|
| 297 |
+
if out_w is not None:
|
| 298 |
+
logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
|
| 299 |
+
except Exception as e:
|
| 300 |
+
logger.warning(f"LM head tie diagnostics failed: {e}")
|
| 301 |
+
|
| 302 |
+
# Hard-tie LM head
|
| 303 |
+
try:
|
| 304 |
+
emb_module = model.get_input_embeddings()
|
| 305 |
+
head_module = model.get_output_embeddings()
|
| 306 |
+
if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
|
| 307 |
+
if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
|
| 308 |
+
with torch.no_grad():
|
| 309 |
+
head_module.weight = emb_module.weight
|
| 310 |
+
logger.info("Force-tied LM head weight to input embeddings (pointer share).")
|
| 311 |
+
except Exception as e:
|
| 312 |
+
logger.warning(f"Force-tie of LM head failed: {e}")
|
| 313 |
+
|
| 314 |
+
# Validate special IDs (info logs only)
|
| 315 |
+
try:
|
| 316 |
+
special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
|
| 317 |
+
try:
|
| 318 |
+
vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
|
| 319 |
+
except Exception:
|
| 320 |
+
vocab_size = 0
|
| 321 |
+
in_emb_mod = model.get_input_embeddings()
|
| 322 |
+
out_emb_mod = model.get_output_embeddings()
|
| 323 |
+
in_w = getattr(in_emb_mod, "weight", None)
|
| 324 |
+
out_w = getattr(out_emb_mod, "weight", None)
|
| 325 |
+
for name in special_names:
|
| 326 |
+
val = getattr(tok, name, None)
|
| 327 |
+
exists = (val is not None)
|
| 328 |
+
in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
|
| 329 |
+
equal_row = None
|
| 330 |
+
if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
|
| 331 |
+
try:
|
| 332 |
+
equal_row = bool(torch.allclose(in_w[val], out_w[val]))
|
| 333 |
+
except Exception:
|
| 334 |
+
equal_row = False
|
| 335 |
+
decoded_str = None
|
| 336 |
+
if exists and isinstance(val, int):
|
| 337 |
+
try:
|
| 338 |
+
decoded_str = tok.decode([val])
|
| 339 |
+
except Exception:
|
| 340 |
+
try:
|
| 341 |
+
decoded_str = tok.convert_ids_to_tokens(val)
|
| 342 |
+
except Exception:
|
| 343 |
+
decoded_str = "<decode_failed>"
|
| 344 |
+
logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
|
| 345 |
+
except Exception as e:
|
| 346 |
+
logger.warning(f"Special token ID/row validation failed: {e}")
|
| 347 |
+
|
| 348 |
+
# Quick tokenizer diagnostics (optional)
|
| 349 |
+
try:
|
| 350 |
+
logger.info("=== TOKENIZER DIAGNOSTICS ===")
|
| 351 |
+
logger.info(f"Tokenizer class: {type(tok).__name__}")
|
| 352 |
+
logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
|
| 353 |
+
# tiny CE smoke test
|
| 354 |
+
with torch.no_grad():
|
| 355 |
+
simple_text = "The cat sat on the mat."
|
| 356 |
+
simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
|
| 357 |
+
simple_mask = torch.ones_like(simple_ids)
|
| 358 |
+
x = model.get_input_embeddings()(simple_ids)
|
| 359 |
+
outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
|
| 360 |
+
logits = model.lm_head(outputs.last_hidden_state)
|
| 361 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 362 |
+
shift_labels = simple_ids[:, 1:].contiguous()
|
| 363 |
+
ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
|
| 364 |
+
logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
|
| 365 |
+
except Exception as e:
|
| 366 |
+
logger.warning(f"Tokenizer diagnostics failed: {e}")
|
| 367 |
+
|
| 368 |
+
# Disable cache during training
|
| 369 |
+
if hasattr(model.config, "use_cache") and training_args.do_train:
|
| 370 |
+
model.config.use_cache = False
|
| 371 |
+
|
| 372 |
+
# Freeze tokenizers
|
| 373 |
+
if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
|
| 374 |
+
for p in model.model.acoustic_tokenizer.parameters():
|
| 375 |
+
p.requires_grad = False
|
| 376 |
+
if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
|
| 377 |
+
for p in model.model.semantic_tokenizer.parameters():
|
| 378 |
+
p.requires_grad = False
|
| 379 |
+
|
| 380 |
+
# LoRA wrap LLM (optional)
|
| 381 |
+
lora_cfg = build_lora_config(model_args)
|
| 382 |
+
tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
|
| 383 |
+
skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
|
| 384 |
+
if not skip_lm_lora:
|
| 385 |
+
model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
|
| 386 |
+
else:
|
| 387 |
+
logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
|
| 388 |
+
|
| 389 |
+
try:
|
| 390 |
+
model.tie_weights()
|
| 391 |
+
except Exception:
|
| 392 |
+
pass
|
| 393 |
+
|
| 394 |
+
# Freeze all then enable trainable subsets
|
| 395 |
+
for _, p in model.named_parameters():
|
| 396 |
+
p.requires_grad = False
|
| 397 |
+
|
| 398 |
+
try:
|
| 399 |
+
for n, p in model.model.language_model.named_parameters():
|
| 400 |
+
if "lora_A" in n or "lora_B" in n:
|
| 401 |
+
p.requires_grad = True
|
| 402 |
+
except Exception:
|
| 403 |
+
logger.warning("Could not re-enable LoRA params on language_model.")
|
| 404 |
+
|
| 405 |
+
# Diffusion head LoRA wrapping (optional)
|
| 406 |
+
if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
|
| 407 |
+
class _HeadForwardShim(nn.Module):
|
| 408 |
+
def __init__(self, base: nn.Module): super().__init__(); self.base = base
|
| 409 |
+
def forward(self, *args, **kwargs):
|
| 410 |
+
if len(args) >= 3:
|
| 411 |
+
noisy_images, timesteps, condition = args[:3]
|
| 412 |
+
else:
|
| 413 |
+
noisy_images = kwargs.get("noisy_images")
|
| 414 |
+
timesteps = kwargs.get("timesteps")
|
| 415 |
+
condition = kwargs.get("condition")
|
| 416 |
+
return self.base(noisy_images, timesteps, condition)
|
| 417 |
+
try:
|
| 418 |
+
shim = _HeadForwardShim(model.model.prediction_head)
|
| 419 |
+
model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
|
| 420 |
+
for n, p in model.model.prediction_head.named_parameters():
|
| 421 |
+
if "lora_A" in n or "lora_B" in n:
|
| 422 |
+
p.requires_grad = True
|
| 423 |
+
except Exception as e:
|
| 424 |
+
logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
|
| 425 |
+
|
| 426 |
+
# Train full diffusion head (optional)
|
| 427 |
+
if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
|
| 428 |
+
for p in model.model.prediction_head.parameters():
|
| 429 |
+
p.requires_grad = True
|
| 430 |
+
|
| 431 |
+
# Freeze diffusion head layers (optional)
|
| 432 |
+
if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
|
| 433 |
+
head_params = list(model.model.prediction_head.named_parameters())
|
| 434 |
+
try:
|
| 435 |
+
indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
|
| 436 |
+
frozen_count = 0
|
| 437 |
+
for i, (name, param) in enumerate(head_params):
|
| 438 |
+
if i in indices_to_freeze:
|
| 439 |
+
param.requires_grad = False
|
| 440 |
+
frozen_count += 1
|
| 441 |
+
logger.info(f"Froze layer [{i}]: {name}")
|
| 442 |
+
logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
|
| 443 |
+
except Exception as e:
|
| 444 |
+
logger.error(f"Could not parse --layers_to_freeze: {e}")
|
| 445 |
+
raise
|
| 446 |
+
|
| 447 |
+
# Connectors
|
| 448 |
+
if getattr(model_args, "train_connectors", False):
|
| 449 |
+
if hasattr(model.model, "acoustic_connector"):
|
| 450 |
+
for p in model.model.acoustic_connector.parameters():
|
| 451 |
+
p.requires_grad = True
|
| 452 |
+
if hasattr(model.model, "semantic_connector"):
|
| 453 |
+
for p in model.model.semantic_connector.parameters():
|
| 454 |
+
p.requires_grad = True
|
| 455 |
+
else:
|
| 456 |
+
if hasattr(model.model, "acoustic_connector"):
|
| 457 |
+
for p in model.model.acoustic_connector.parameters():
|
| 458 |
+
p.requires_grad = False
|
| 459 |
+
if hasattr(model.model, "semantic_connector"):
|
| 460 |
+
for p in model.model.semantic_connector.parameters():
|
| 461 |
+
p.requires_grad = False
|
| 462 |
+
|
| 463 |
+
# Freeze embedding + head
|
| 464 |
+
try:
|
| 465 |
+
emb = model.get_input_embeddings()
|
| 466 |
+
if hasattr(emb, "weight"):
|
| 467 |
+
emb.weight.requires_grad_(False)
|
| 468 |
+
head = model.get_output_embeddings()
|
| 469 |
+
if head is not None and hasattr(head, "weight"):
|
| 470 |
+
head.weight.requires_grad_(False)
|
| 471 |
+
except Exception:
|
| 472 |
+
pass
|
| 473 |
+
|
| 474 |
+
# Diagnostics
|
| 475 |
+
def _sum_params(named_iter):
|
| 476 |
+
return sum(p.numel() for _, p in named_iter if p.requires_grad)
|
| 477 |
+
try:
|
| 478 |
+
lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
|
| 479 |
+
pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
|
| 480 |
+
ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
|
| 481 |
+
se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
|
| 482 |
+
total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 483 |
+
logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
|
| 484 |
+
logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
|
| 485 |
+
except Exception:
|
| 486 |
+
pass
|
| 487 |
+
|
| 488 |
+
# Preprocessed data classes
|
| 489 |
+
class PreprocessedBatchDataset:
|
| 490 |
+
def __init__(self, preprocessed_file: str):
|
| 491 |
+
self.data = torch.load(preprocessed_file, map_location='cpu')
|
| 492 |
+
logger.info(f"Loaded {len(self.data)} preprocessed batches from {preprocessed_file}")
|
| 493 |
+
|
| 494 |
+
def __len__(self):
|
| 495 |
+
return len(self.data)
|
| 496 |
+
|
| 497 |
+
def __getitem__(self, idx):
|
| 498 |
+
batch = self.data[idx]
|
| 499 |
+
result = {}
|
| 500 |
+
for k, v in batch.items():
|
| 501 |
+
if isinstance(v, torch.Tensor):
|
| 502 |
+
result[k] = v
|
| 503 |
+
else:
|
| 504 |
+
result[k] = v
|
| 505 |
+
return result
|
| 506 |
+
|
| 507 |
+
class PreprocessedBatchSubset:
|
| 508 |
+
def __init__(self, dataset: 'PreprocessedBatchDataset', indices: List[int]):
|
| 509 |
+
self.dataset = dataset
|
| 510 |
+
self.indices = indices
|
| 511 |
+
|
| 512 |
+
def __len__(self):
|
| 513 |
+
return len(self.indices)
|
| 514 |
+
|
| 515 |
+
def __getitem__(self, idx):
|
| 516 |
+
actual_idx = self.indices[idx]
|
| 517 |
+
return self.dataset[actual_idx]
|
| 518 |
+
|
| 519 |
+
class PreprocessedBatchCollator:
|
| 520 |
+
def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
|
| 521 |
+
if not batch:
|
| 522 |
+
return {}
|
| 523 |
+
result = {}
|
| 524 |
+
for key in batch[0].keys():
|
| 525 |
+
tensors = [b[key] for b in batch if b[key] is not None]
|
| 526 |
+
if tensors and isinstance(tensors[0], torch.Tensor):
|
| 527 |
+
result[key] = torch.cat(tensors, dim=0)
|
| 528 |
+
else:
|
| 529 |
+
result[key] = tensors[0] if tensors else None
|
| 530 |
+
return result
|
| 531 |
+
|
| 532 |
+
# Datasets
|
| 533 |
+
preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
|
| 534 |
+
preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
|
| 535 |
+
|
| 536 |
+
if os.path.exists(preprocessed_file):
|
| 537 |
+
logger.info(f"Loading preprocessed data from {preprocessed_file}")
|
| 538 |
+
preprocessed_data = PreprocessedBatchDataset(preprocessed_file)
|
| 539 |
+
|
| 540 |
+
train_dataset = preprocessed_data
|
| 541 |
+
eval_dataset = None
|
| 542 |
+
|
| 543 |
+
if training_args.do_eval and data_args.eval_split_size and data_args.eval_split_size > 0 and len(preprocessed_data) > 1:
|
| 544 |
+
num_eval = max(1, int(len(preprocessed_data) * data_args.eval_split_size))
|
| 545 |
+
num_train = len(preprocessed_data) - num_eval
|
| 546 |
+
indices = list(range(len(preprocessed_data)))
|
| 547 |
+
import random
|
| 548 |
+
random.Random(training_args.seed).shuffle(indices)
|
| 549 |
+
train_indices = indices[:num_train]
|
| 550 |
+
eval_indices = indices[num_train:]
|
| 551 |
+
train_dataset = PreprocessedBatchSubset(preprocessed_data, train_indices)
|
| 552 |
+
eval_dataset = PreprocessedBatchSubset(preprocessed_data, eval_indices)
|
| 553 |
+
else:
|
| 554 |
+
logger.info(f"Preprocessed data not found at {preprocessed_file}, loading from raw JSONL/HF datasets")
|
| 555 |
+
verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
|
| 556 |
+
if data_args.train_jsonl is not None:
|
| 557 |
+
data_files: Dict[str, str] = {"train": data_args.train_jsonl}
|
| 558 |
+
if data_args.validation_jsonl is not None:
|
| 559 |
+
data_files["validation"] = data_args.validation_jsonl
|
| 560 |
+
raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
|
| 561 |
+
else:
|
| 562 |
+
if data_args.dataset_name is None:
|
| 563 |
+
raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
|
| 564 |
+
raw = load_dataset(
|
| 565 |
+
data_args.dataset_name,
|
| 566 |
+
data_args.dataset_config_name,
|
| 567 |
+
verification_mode=verification_mode,
|
| 568 |
+
cache_dir=model_args.cache_dir,
|
| 569 |
+
)
|
| 570 |
+
train_ds = raw[data_args.train_split_name]
|
| 571 |
+
eval_ds = None
|
| 572 |
+
if training_args.do_eval:
|
| 573 |
+
if data_args.eval_split_name and data_args.eval_split_name in raw:
|
| 574 |
+
eval_ds = raw[data_args.eval_split_name]
|
| 575 |
+
elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
|
| 576 |
+
split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
|
| 577 |
+
train_ds, eval_ds = split["train"], split["test"]
|
| 578 |
+
|
| 579 |
+
train_dataset = VibeVoiceDataset(
|
| 580 |
+
train_ds,
|
| 581 |
+
text_column=data_args.text_column_name,
|
| 582 |
+
audio_column=data_args.audio_column_name,
|
| 583 |
+
voice_prompts_column=data_args.voice_prompts_column_name,
|
| 584 |
+
)
|
| 585 |
+
eval_dataset = None
|
| 586 |
+
if eval_ds is not None:
|
| 587 |
+
eval_dataset = VibeVoiceDataset(
|
| 588 |
+
eval_ds,
|
| 589 |
+
text_column=data_args.text_column_name,
|
| 590 |
+
audio_column=data_args.audio_column_name,
|
| 591 |
+
voice_prompts_column=data_args.voice_prompts_column_name,
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
# Ratios/dims from processor+model
|
| 595 |
+
speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
|
| 596 |
+
semantic_dim = getattr(model.config, "semantic_vae_dim", None)
|
| 597 |
+
if semantic_dim is None:
|
| 598 |
+
try:
|
| 599 |
+
semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
|
| 600 |
+
except Exception:
|
| 601 |
+
semantic_dim = 128
|
| 602 |
+
|
| 603 |
+
compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
|
| 604 |
+
|
| 605 |
+
if os.path.exists(preprocessed_file):
|
| 606 |
+
data_collator = PreprocessedBatchCollator()
|
| 607 |
+
else:
|
| 608 |
+
data_collator = VibeVoiceCollator(
|
| 609 |
+
processor=processor,
|
| 610 |
+
max_length=data_args.max_length,
|
| 611 |
+
speech_compress_ratio=speech_compress_ratio,
|
| 612 |
+
semantic_vae_dim=semantic_dim,
|
| 613 |
+
compute_semantics=compute_semantics_flag,
|
| 614 |
+
debug_checks=False,
|
| 615 |
+
voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
|
| 616 |
+
)
|
| 617 |
+
|
| 618 |
+
class LoRADebugCallback(TrainerCallback):
|
| 619 |
+
def __init__(self, log_every_n_steps: int = 50):
|
| 620 |
+
self.log_every_n_steps = max(1, int(log_every_n_steps))
|
| 621 |
+
self.prev_param_norms: Dict[str, float] = {}
|
| 622 |
+
self.lora_param_names: List[str] = []
|
| 623 |
+
|
| 624 |
+
def on_train_begin(self, args, state, control, model=None, **kwargs):
|
| 625 |
+
try:
|
| 626 |
+
if model is None:
|
| 627 |
+
return
|
| 628 |
+
named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
|
| 629 |
+
self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
|
| 630 |
+
for n in self.lora_param_names:
|
| 631 |
+
p = named[n]
|
| 632 |
+
self.prev_param_norms[n] = float(p.data.norm().item())
|
| 633 |
+
total = len(self.lora_param_names)
|
| 634 |
+
req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
|
| 635 |
+
num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
|
| 636 |
+
num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
|
| 637 |
+
zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
|
| 638 |
+
logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
|
| 639 |
+
if total == 0:
|
| 640 |
+
logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
|
| 641 |
+
if req_grad != total:
|
| 642 |
+
logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
|
| 643 |
+
except Exception as e:
|
| 644 |
+
logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
|
| 645 |
+
|
| 646 |
+
def on_step_end(self, args, state, control, model=None, **kwargs):
|
| 647 |
+
try:
|
| 648 |
+
if model is None or len(self.lora_param_names) == 0:
|
| 649 |
+
return
|
| 650 |
+
step = int(getattr(state, "global_step", 0) or 0)
|
| 651 |
+
if step % self.log_every_n_steps != 0 and step != 1:
|
| 652 |
+
return
|
| 653 |
+
named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
|
| 654 |
+
changed_A = 0
|
| 655 |
+
changed_B = 0
|
| 656 |
+
zero_B = 0
|
| 657 |
+
eps = 1e-12
|
| 658 |
+
for n in self.lora_param_names:
|
| 659 |
+
p = named.get(n, None)
|
| 660 |
+
if p is None:
|
| 661 |
+
continue
|
| 662 |
+
prev = self.prev_param_norms.get(n, 0.0)
|
| 663 |
+
curr = float(p.data.norm().item())
|
| 664 |
+
if "lora_A" in n and abs(curr - prev) > eps:
|
| 665 |
+
changed_A += 1
|
| 666 |
+
if "lora_B" in n:
|
| 667 |
+
if abs(curr - prev) > eps:
|
| 668 |
+
changed_B += 1
|
| 669 |
+
if curr == 0.0:
|
| 670 |
+
zero_B += 1
|
| 671 |
+
self.prev_param_norms[n] = curr
|
| 672 |
+
total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
|
| 673 |
+
total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
|
| 674 |
+
logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
|
| 675 |
+
except Exception as e:
|
| 676 |
+
logger.warning(f"LoRA debug (on_step_end) failed: {e}")
|
| 677 |
+
|
| 678 |
+
class VibeVoiceTrainer(Trainer):
|
| 679 |
+
def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
|
| 680 |
+
labels = inputs.get("input_ids")
|
| 681 |
+
attention_mask = inputs.get("attention_mask")
|
| 682 |
+
acoustic_input_mask = inputs.get("acoustic_input_mask")
|
| 683 |
+
|
| 684 |
+
# Ensure semantic tensors exist and have correct dtype/device
|
| 685 |
+
sem = inputs.get("speech_semantic_tensors", None)
|
| 686 |
+
try:
|
| 687 |
+
target_dtype = next(model.model.semantic_connector.parameters()).dtype
|
| 688 |
+
except Exception:
|
| 689 |
+
target_dtype = model.get_input_embeddings().weight.dtype
|
| 690 |
+
|
| 691 |
+
if sem is None:
|
| 692 |
+
sm = inputs.get("speech_masks")
|
| 693 |
+
if sm is not None:
|
| 694 |
+
zeros = torch.zeros(
|
| 695 |
+
sm.size(0), sm.size(1),
|
| 696 |
+
getattr(model.config, "semantic_vae_dim", 128),
|
| 697 |
+
dtype=target_dtype,
|
| 698 |
+
device=sm.device,
|
| 699 |
+
)
|
| 700 |
+
inputs["speech_semantic_tensors"] = zeros
|
| 701 |
+
else:
|
| 702 |
+
if isinstance(sem, torch.Tensor):
|
| 703 |
+
inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
|
| 704 |
+
|
| 705 |
+
outputs = model(
|
| 706 |
+
input_ids=inputs.get("input_ids"),
|
| 707 |
+
attention_mask=attention_mask,
|
| 708 |
+
speech_tensors=inputs.get("speech_tensors"),
|
| 709 |
+
speech_masks=inputs.get("speech_masks"),
|
| 710 |
+
speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
|
| 711 |
+
acoustic_input_mask=acoustic_input_mask,
|
| 712 |
+
acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
|
| 713 |
+
speeches_loss_input=inputs.get("speeches_loss_input"),
|
| 714 |
+
ddpm_batch_mul=training_args.ddpm_batch_mul,
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
# Invariants: token/latent selection equality across views (warn, don't assert)
|
| 718 |
+
try:
|
| 719 |
+
al_mask = inputs.get("acoustic_loss_mask")
|
| 720 |
+
sp_masks = inputs.get("speech_masks")
|
| 721 |
+
sp_loss_sel = inputs.get("speeches_loss_input")
|
| 722 |
+
num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
|
| 723 |
+
num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
|
| 724 |
+
num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
|
| 725 |
+
num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
|
| 726 |
+
self.log({
|
| 727 |
+
"debug/num_tok_total": float(num_tok_total),
|
| 728 |
+
"debug/num_tok_loss": float(num_tok_loss),
|
| 729 |
+
"debug/num_lat_total": float(num_lat_total),
|
| 730 |
+
"debug/num_lat_loss": float(num_lat_loss),
|
| 731 |
+
})
|
| 732 |
+
if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
|
| 733 |
+
if num_tok_loss != num_lat_loss:
|
| 734 |
+
logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
|
| 735 |
+
except Exception:
|
| 736 |
+
pass
|
| 737 |
+
|
| 738 |
+
# CE Loss
|
| 739 |
+
logits = outputs.logits
|
| 740 |
+
ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
|
| 741 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 742 |
+
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
|
| 743 |
+
ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
|
| 744 |
+
|
| 745 |
+
# Optional CE diagnostics
|
| 746 |
+
try:
|
| 747 |
+
self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
|
| 748 |
+
except Exception as e:
|
| 749 |
+
logger.warning(f"Failed invoking CE debug: {e}")
|
| 750 |
+
|
| 751 |
+
# Diffusion loss
|
| 752 |
+
diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
|
| 753 |
+
total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
|
| 754 |
+
|
| 755 |
+
# Logs
|
| 756 |
+
try:
|
| 757 |
+
prefix = "train" if model.training else "eval"
|
| 758 |
+
self.log({
|
| 759 |
+
f"{prefix}/ce_loss": ce_loss.detach().item(),
|
| 760 |
+
f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
|
| 761 |
+
})
|
| 762 |
+
if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
|
| 763 |
+
lr_val = self.optimizer.param_groups[0].get("lr", None)
|
| 764 |
+
if lr_val is not None:
|
| 765 |
+
self.log({"train/learning_rate_real": float(lr_val)})
|
| 766 |
+
except Exception:
|
| 767 |
+
pass
|
| 768 |
+
|
| 769 |
+
return (total, outputs) if return_outputs else total
|
| 770 |
+
|
| 771 |
+
def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
|
| 772 |
+
try:
|
| 773 |
+
if not getattr(training_args, "debug_ce_details", False):
|
| 774 |
+
return
|
| 775 |
+
step = int(getattr(self.state, "global_step", 0) or 0)
|
| 776 |
+
every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
|
| 777 |
+
if not (step <= 1 or (step % every_n == 0)):
|
| 778 |
+
return
|
| 779 |
+
|
| 780 |
+
with torch.no_grad():
|
| 781 |
+
vocab = shift_logits.size(-1)
|
| 782 |
+
per_token_loss = F.cross_entropy(
|
| 783 |
+
shift_logits.view(-1, vocab),
|
| 784 |
+
ce_labels.view(-1),
|
| 785 |
+
reduction="none",
|
| 786 |
+
ignore_index=-100,
|
| 787 |
+
).view_as(ce_labels)
|
| 788 |
+
|
| 789 |
+
valid_mask = ce_labels.ne(-100)
|
| 790 |
+
num_valid = int(valid_mask.sum().item())
|
| 791 |
+
avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
|
| 792 |
+
|
| 793 |
+
per_ex_avgs = []
|
| 794 |
+
max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
|
| 795 |
+
B = ce_labels.size(0)
|
| 796 |
+
for b in range(min(B, max_examples)):
|
| 797 |
+
vb = valid_mask[b]
|
| 798 |
+
if int(vb.sum().item()) > 0:
|
| 799 |
+
per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
|
| 800 |
+
else:
|
| 801 |
+
per_ex_avgs.append(float("nan"))
|
| 802 |
+
logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
|
| 803 |
+
except Exception as e:
|
| 804 |
+
logger.warning(f"CE detailed debug failed: {e}")
|
| 805 |
+
|
| 806 |
+
# --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
|
| 810 |
+
try:
|
| 811 |
+
target_dir = output_dir or self.args.output_dir
|
| 812 |
+
lora_out = os.path.join(target_dir, "lora")
|
| 813 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 814 |
+
|
| 815 |
+
# --- LLM PEFT adapters (if LoRA-wrapped) ---
|
| 816 |
+
language_model = getattr(self.model.model, "language_model", None)
|
| 817 |
+
if hasattr(language_model, "save_pretrained"):
|
| 818 |
+
language_model.save_pretrained(lora_out)
|
| 819 |
+
|
| 820 |
+
# --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
|
| 821 |
+
pred_head = getattr(self.model.model, "prediction_head", None)
|
| 822 |
+
if hasattr(pred_head, "save_pretrained"):
|
| 823 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 824 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 825 |
+
pred_head.save_pretrained(ph_dir)
|
| 826 |
+
|
| 827 |
+
# --- ALWAYS save FULL diffusion head state_dict for fallback ---
|
| 828 |
+
if pred_head is not None and hasattr(pred_head, "state_dict"):
|
| 829 |
+
sd = pred_head.state_dict()
|
| 830 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 831 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 832 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 833 |
+
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
|
| 834 |
+
|
| 835 |
+
# --- Connectors (plain state_dicts) ---
|
| 836 |
+
ac = getattr(self.model.model, "acoustic_connector", None)
|
| 837 |
+
if ac is not None:
|
| 838 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 839 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 840 |
+
torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 841 |
+
|
| 842 |
+
se = getattr(self.model.model, "semantic_connector", None)
|
| 843 |
+
if se is not None:
|
| 844 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 845 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 846 |
+
torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 847 |
+
|
| 848 |
+
except Exception as e:
|
| 849 |
+
logger.warning(f"Failed to save LoRA assets: {e}")
|
| 850 |
+
|
| 851 |
+
|
| 852 |
+
# ------------- Build the Trainer -------------
|
| 853 |
+
|
| 854 |
+
# Resolve which adapters to apply in samples
|
| 855 |
+
|
| 856 |
+
ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cuda")
|
| 857 |
+
|
| 858 |
+
# --- CRITICAL FIX: CAST TRAINABLE PARAMS TO FP32 ---
|
| 859 |
+
# This prevents 'ValueError: Attempting to unscale FP16 gradients'
|
| 860 |
+
if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
|
| 861 |
+
print('>>> INFO: Enforcing float32 for trainable parameters (LoRA/Head) to fix GradScaler.')
|
| 862 |
+
for name, param in model.named_parameters():
|
| 863 |
+
if param.requires_grad:
|
| 864 |
+
param.data = param.data.to(torch.float32)
|
| 865 |
+
# ---------------------------------------------------
|
| 866 |
+
|
| 867 |
+
trainer = VibeVoiceTrainer(
|
| 868 |
+
model=model,
|
| 869 |
+
args=training_args,
|
| 870 |
+
train_dataset=train_dataset,
|
| 871 |
+
eval_dataset=eval_dataset,
|
| 872 |
+
data_collator=data_collator,
|
| 873 |
+
callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
|
| 874 |
+
)
|
| 875 |
+
|
| 876 |
+
# Optional debug pre-training save
|
| 877 |
+
if getattr(training_args, "debug_save", False):
|
| 878 |
+
try:
|
| 879 |
+
debug_dir = os.path.join(training_args.output_dir, "debug_initial")
|
| 880 |
+
lora_out = os.path.join(debug_dir, "lora")
|
| 881 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 882 |
+
logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
|
| 883 |
+
# language model adapters / base
|
| 884 |
+
try:
|
| 885 |
+
if hasattr(model.model.language_model, "save_pretrained"):
|
| 886 |
+
model.model.language_model.save_pretrained(lora_out)
|
| 887 |
+
except Exception as e_lm:
|
| 888 |
+
logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
|
| 889 |
+
# diffusion head
|
| 890 |
+
try:
|
| 891 |
+
if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
|
| 892 |
+
model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
|
| 893 |
+
except Exception as e_head:
|
| 894 |
+
logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
|
| 895 |
+
# NEW: full diffusion head state_dict as fallback
|
| 896 |
+
try:
|
| 897 |
+
ph = getattr(model.model, "prediction_head", None)
|
| 898 |
+
if ph is not None and hasattr(ph, "state_dict"):
|
| 899 |
+
sd = ph.state_dict()
|
| 900 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 901 |
+
os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
|
| 902 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
|
| 903 |
+
except Exception as e:
|
| 904 |
+
logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
|
| 905 |
+
# connectors
|
| 906 |
+
try:
|
| 907 |
+
ac_conn = getattr(model.model, "acoustic_connector", None)
|
| 908 |
+
if ac_conn is not None:
|
| 909 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 910 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 911 |
+
torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 912 |
+
except Exception as e_ac:
|
| 913 |
+
logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
|
| 914 |
+
try:
|
| 915 |
+
se_conn = getattr(model.model, "semantic_connector", None)
|
| 916 |
+
if se_conn is not None:
|
| 917 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 918 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 919 |
+
torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 920 |
+
except Exception as e_se:
|
| 921 |
+
logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
|
| 922 |
+
except Exception as e:
|
| 923 |
+
logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
|
| 924 |
+
|
| 925 |
+
if getattr(training_args, "gradient_checkpointing", False):
|
| 926 |
+
try:
|
| 927 |
+
model.gradient_checkpointing_enable()
|
| 928 |
+
except Exception:
|
| 929 |
+
logger.warning("Failed to enable gradient checkpointing on the model.")
|
| 930 |
+
|
| 931 |
+
if training_args.do_train:
|
| 932 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 933 |
+
|
| 934 |
+
lora_out = os.path.join(training_args.output_dir, "lora")
|
| 935 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 936 |
+
|
| 937 |
+
# LLM PEFT (if any)
|
| 938 |
+
lm = getattr(model.model, "language_model", None)
|
| 939 |
+
if hasattr(lm, "save_pretrained"):
|
| 940 |
+
lm.save_pretrained(lora_out)
|
| 941 |
+
|
| 942 |
+
# Diffusion head PEFT (if any)
|
| 943 |
+
ph = getattr(model.model, "prediction_head", None)
|
| 944 |
+
if hasattr(ph, "save_pretrained"):
|
| 945 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 946 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 947 |
+
ph.save_pretrained(ph_dir)
|
| 948 |
+
|
| 949 |
+
# ALWAYS: full diffusion head state_dict fallback
|
| 950 |
+
try:
|
| 951 |
+
if ph is not None and hasattr(ph, "state_dict"):
|
| 952 |
+
sd = ph.state_dict()
|
| 953 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 954 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 955 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 956 |
+
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
|
| 957 |
+
except Exception as e:
|
| 958 |
+
logger.warning(f"Failed to save FULL diffusion head at end: {e}")
|
| 959 |
+
|
| 960 |
+
# Connectors (if trained)
|
| 961 |
+
try:
|
| 962 |
+
ac = getattr(model.model, "acoustic_connector", None)
|
| 963 |
+
if ac is not None:
|
| 964 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 965 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 966 |
+
torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 967 |
+
except Exception as e:
|
| 968 |
+
logger.warning(f"Failed to save acoustic_connector: {e}")
|
| 969 |
+
|
| 970 |
+
try:
|
| 971 |
+
se = getattr(model.model, "semantic_connector", None)
|
| 972 |
+
if se is not None:
|
| 973 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 974 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 975 |
+
torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 976 |
+
except Exception as e:
|
| 977 |
+
logger.warning(f"Failed to save semantic_connector: {e}")
|
| 978 |
+
|
| 979 |
+
if training_args.do_eval and eval_dataset is not None:
|
| 980 |
+
trainer.evaluate()
|
| 981 |
+
|
| 982 |
+
|
| 983 |
+
if __name__ == "__main__":
|
| 984 |
+
main()
|
VibeVoice-finetuning/src/vibevoice/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/configs/qwen2.5_1.5b_64k.json
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_attn_implementation_autoset": true,
|
| 3 |
+
"acoustic_vae_dim": 64,
|
| 4 |
+
"acoustic_tokenizer_config": {
|
| 5 |
+
"causal": true,
|
| 6 |
+
"channels": 1,
|
| 7 |
+
"conv_bias": true,
|
| 8 |
+
"conv_norm": "none",
|
| 9 |
+
"corpus_normalize": 0.0,
|
| 10 |
+
"decoder_depths": null,
|
| 11 |
+
"decoder_n_filters": 32,
|
| 12 |
+
"decoder_ratios": [
|
| 13 |
+
8,
|
| 14 |
+
5,
|
| 15 |
+
5,
|
| 16 |
+
4,
|
| 17 |
+
2,
|
| 18 |
+
2
|
| 19 |
+
],
|
| 20 |
+
"disable_last_norm": true,
|
| 21 |
+
"encoder_depths": "3-3-3-3-3-3-8",
|
| 22 |
+
"encoder_n_filters": 32,
|
| 23 |
+
"encoder_ratios": [
|
| 24 |
+
8,
|
| 25 |
+
5,
|
| 26 |
+
5,
|
| 27 |
+
4,
|
| 28 |
+
2,
|
| 29 |
+
2
|
| 30 |
+
],
|
| 31 |
+
"fix_std": 0.5,
|
| 32 |
+
"layer_scale_init_value": 1e-06,
|
| 33 |
+
"layernorm": "RMSNorm",
|
| 34 |
+
"layernorm_elementwise_affine": true,
|
| 35 |
+
"layernorm_eps": 1e-05,
|
| 36 |
+
"mixer_layer": "depthwise_conv",
|
| 37 |
+
"model_type": "vibepod_acoustic_tokenizer",
|
| 38 |
+
"pad_mode": "constant",
|
| 39 |
+
"std_dist_type": "gaussian",
|
| 40 |
+
"vae_dim": 64,
|
| 41 |
+
"weight_init_value": 0.01
|
| 42 |
+
},
|
| 43 |
+
"decoder_config": {
|
| 44 |
+
"attention_dropout": 0.0,
|
| 45 |
+
"hidden_act": "silu",
|
| 46 |
+
"hidden_size": 1536,
|
| 47 |
+
"initializer_range": 0.02,
|
| 48 |
+
"intermediate_size": 8960,
|
| 49 |
+
"max_position_embeddings": 65536,
|
| 50 |
+
"max_window_layers": 28,
|
| 51 |
+
"model_type": "qwen2",
|
| 52 |
+
"num_attention_heads": 12,
|
| 53 |
+
"num_hidden_layers": 28,
|
| 54 |
+
"num_key_value_heads": 2,
|
| 55 |
+
"rms_norm_eps": 1e-06,
|
| 56 |
+
"rope_scaling": null,
|
| 57 |
+
"rope_theta": 1000000.0,
|
| 58 |
+
"sliding_window": null,
|
| 59 |
+
"tie_word_embeddings": true,
|
| 60 |
+
"torch_dtype": "bfloat16",
|
| 61 |
+
"use_cache": true,
|
| 62 |
+
"use_sliding_window": false,
|
| 63 |
+
"vocab_size": 151936
|
| 64 |
+
},
|
| 65 |
+
"diffusion_head_config": {
|
| 66 |
+
"ddpm_batch_mul": 4,
|
| 67 |
+
"ddpm_beta_schedule": "cosine",
|
| 68 |
+
"ddpm_num_inference_steps": 20,
|
| 69 |
+
"ddpm_num_steps": 1000,
|
| 70 |
+
"diffusion_type": "ddpm",
|
| 71 |
+
"head_ffn_ratio": 3.0,
|
| 72 |
+
"head_layers": 4,
|
| 73 |
+
"hidden_size": 1536,
|
| 74 |
+
"latent_size": 64,
|
| 75 |
+
"model_type": "vibepod_diffusion_head",
|
| 76 |
+
"prediction_type": "v_prediction",
|
| 77 |
+
"rms_norm_eps": 1e-05,
|
| 78 |
+
"speech_vae_dim": 64
|
| 79 |
+
},
|
| 80 |
+
"model_type": "vibepod",
|
| 81 |
+
"semantic_tokenizer_config": {
|
| 82 |
+
"causal": true,
|
| 83 |
+
"channels": 1,
|
| 84 |
+
"conv_bias": true,
|
| 85 |
+
"conv_norm": "none",
|
| 86 |
+
"corpus_normalize": 0.0,
|
| 87 |
+
"disable_last_norm": true,
|
| 88 |
+
"encoder_depths": "3-3-3-3-3-3-8",
|
| 89 |
+
"encoder_n_filters": 32,
|
| 90 |
+
"encoder_ratios": [
|
| 91 |
+
8,
|
| 92 |
+
5,
|
| 93 |
+
5,
|
| 94 |
+
4,
|
| 95 |
+
2,
|
| 96 |
+
2
|
| 97 |
+
],
|
| 98 |
+
"fix_std": 0,
|
| 99 |
+
"layer_scale_init_value": 1e-06,
|
| 100 |
+
"layernorm": "RMSNorm",
|
| 101 |
+
"layernorm_elementwise_affine": true,
|
| 102 |
+
"layernorm_eps": 1e-05,
|
| 103 |
+
"mixer_layer": "depthwise_conv",
|
| 104 |
+
"model_type": "vibepod_semantic_tokenizer",
|
| 105 |
+
"pad_mode": "constant",
|
| 106 |
+
"std_dist_type": "none",
|
| 107 |
+
"vae_dim": 128,
|
| 108 |
+
"weight_init_value": 0.01
|
| 109 |
+
},
|
| 110 |
+
"semantic_vae_dim": 128,
|
| 111 |
+
"torch_dtype": "bfloat16"
|
| 112 |
+
}
|
VibeVoice-finetuning/src/vibevoice/configs/qwen2.5_7b_32k.json
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_attn_implementation_autoset": true,
|
| 3 |
+
"acoustic_vae_dim": 64,
|
| 4 |
+
"acoustic_tokenizer_config": {
|
| 5 |
+
"causal": true,
|
| 6 |
+
"channels": 1,
|
| 7 |
+
"conv_bias": true,
|
| 8 |
+
"conv_norm": "none",
|
| 9 |
+
"corpus_normalize": 0.0,
|
| 10 |
+
"decoder_depths": null,
|
| 11 |
+
"decoder_n_filters": 32,
|
| 12 |
+
"decoder_ratios": [
|
| 13 |
+
8,
|
| 14 |
+
5,
|
| 15 |
+
5,
|
| 16 |
+
4,
|
| 17 |
+
2,
|
| 18 |
+
2
|
| 19 |
+
],
|
| 20 |
+
"disable_last_norm": true,
|
| 21 |
+
"encoder_depths": "3-3-3-3-3-3-8",
|
| 22 |
+
"encoder_n_filters": 32,
|
| 23 |
+
"encoder_ratios": [
|
| 24 |
+
8,
|
| 25 |
+
5,
|
| 26 |
+
5,
|
| 27 |
+
4,
|
| 28 |
+
2,
|
| 29 |
+
2
|
| 30 |
+
],
|
| 31 |
+
"fix_std": 0.5,
|
| 32 |
+
"layer_scale_init_value": 1e-06,
|
| 33 |
+
"layernorm": "RMSNorm",
|
| 34 |
+
"layernorm_elementwise_affine": true,
|
| 35 |
+
"layernorm_eps": 1e-05,
|
| 36 |
+
"mixer_layer": "depthwise_conv",
|
| 37 |
+
"model_type": "vibepod_acoustic_tokenizer",
|
| 38 |
+
"pad_mode": "constant",
|
| 39 |
+
"std_dist_type": "gaussian",
|
| 40 |
+
"vae_dim": 64,
|
| 41 |
+
"weight_init_value": 0.01
|
| 42 |
+
},
|
| 43 |
+
"decoder_config": {
|
| 44 |
+
"attention_dropout": 0.0,
|
| 45 |
+
"hidden_act": "silu",
|
| 46 |
+
"hidden_size": 3584,
|
| 47 |
+
"initializer_range": 0.02,
|
| 48 |
+
"intermediate_size": 18944,
|
| 49 |
+
"max_position_embeddings": 32768,
|
| 50 |
+
"max_window_layers": 28,
|
| 51 |
+
"model_type": "qwen2",
|
| 52 |
+
"num_attention_heads": 28,
|
| 53 |
+
"num_hidden_layers": 28,
|
| 54 |
+
"num_key_value_heads": 4,
|
| 55 |
+
"rms_norm_eps": 1e-06,
|
| 56 |
+
"rope_theta": 1000000.0,
|
| 57 |
+
"sliding_window": null,
|
| 58 |
+
"tie_word_embeddings": false,
|
| 59 |
+
"torch_dtype": "bfloat16",
|
| 60 |
+
"transformers_version": "4.40.1",
|
| 61 |
+
"use_cache": true,
|
| 62 |
+
"use_mrope": false,
|
| 63 |
+
"use_sliding_window": false,
|
| 64 |
+
"vocab_size": 152064
|
| 65 |
+
},
|
| 66 |
+
"diffusion_head_config": {
|
| 67 |
+
"ddpm_batch_mul": 4,
|
| 68 |
+
"ddpm_beta_schedule": "cosine",
|
| 69 |
+
"ddpm_num_inference_steps": 20,
|
| 70 |
+
"ddpm_num_steps": 1000,
|
| 71 |
+
"diffusion_type": "ddpm",
|
| 72 |
+
"head_ffn_ratio": 3.0,
|
| 73 |
+
"head_layers": 4,
|
| 74 |
+
"hidden_size": 3584,
|
| 75 |
+
"latent_size": 64,
|
| 76 |
+
"model_type": "vibepod_diffusion_head",
|
| 77 |
+
"prediction_type": "v_prediction",
|
| 78 |
+
"rms_norm_eps": 1e-05,
|
| 79 |
+
"speech_vae_dim": 64
|
| 80 |
+
},
|
| 81 |
+
"model_type": "vibepod",
|
| 82 |
+
"semantic_tokenizer_config": {
|
| 83 |
+
"causal": true,
|
| 84 |
+
"channels": 1,
|
| 85 |
+
"conv_bias": true,
|
| 86 |
+
"conv_norm": "none",
|
| 87 |
+
"corpus_normalize": 0.0,
|
| 88 |
+
"disable_last_norm": true,
|
| 89 |
+
"encoder_depths": "3-3-3-3-3-3-8",
|
| 90 |
+
"encoder_n_filters": 32,
|
| 91 |
+
"encoder_ratios": [
|
| 92 |
+
8,
|
| 93 |
+
5,
|
| 94 |
+
5,
|
| 95 |
+
4,
|
| 96 |
+
2,
|
| 97 |
+
2
|
| 98 |
+
],
|
| 99 |
+
"fix_std": 0,
|
| 100 |
+
"layer_scale_init_value": 1e-06,
|
| 101 |
+
"layernorm": "RMSNorm",
|
| 102 |
+
"layernorm_elementwise_affine": true,
|
| 103 |
+
"layernorm_eps": 1e-05,
|
| 104 |
+
"mixer_layer": "depthwise_conv",
|
| 105 |
+
"model_type": "vibepod_semantic_tokenizer",
|
| 106 |
+
"pad_mode": "constant",
|
| 107 |
+
"std_dist_type": "none",
|
| 108 |
+
"vae_dim": 128,
|
| 109 |
+
"weight_init_value": 0.01
|
| 110 |
+
},
|
| 111 |
+
"semantic_vae_dim": 128,
|
| 112 |
+
"torch_dtype": "bfloat16"
|
| 113 |
+
}
|
VibeVoice-finetuning/src/vibevoice/modular/__init__.py
ADDED
|
File without changes
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (167 Bytes). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-311.pyc
ADDED
|
Binary file (9.25 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-312.pyc
ADDED
|
Binary file (8.32 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-311.pyc
ADDED
|
Binary file (29.3 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-312.pyc
ADDED
|
Binary file (27.4 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-311.pyc
ADDED
|
Binary file (15.8 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-312.pyc
ADDED
|
Binary file (14.5 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-311.pyc
ADDED
|
Binary file (8.29 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-312.pyc
ADDED
|
Binary file (7.66 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-311.pyc
ADDED
|
Binary file (65.4 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-312.pyc
ADDED
|
Binary file (60.9 kB). View file
|
|
|
VibeVoice-finetuning/src/vibevoice/modular/configuration_vibevoice.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" VibeVoice_AcousticTokenizer model configuration"""
|
| 2 |
+
|
| 3 |
+
from typing import Dict, List, Optional, Tuple
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
from transformers.utils import logging
|
| 7 |
+
|
| 8 |
+
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
|
| 9 |
+
|
| 10 |
+
logger = logging.get_logger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class VibeVoiceAcousticTokenizerConfig(PretrainedConfig):
|
| 14 |
+
model_type = "vibevoice_acoustic_tokenizer"
|
| 15 |
+
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
channels: int = 1,
|
| 19 |
+
corpus_normalize: float = 0.0,
|
| 20 |
+
causal: bool = True,
|
| 21 |
+
vae_dim: int = 64,
|
| 22 |
+
fix_std: float = 0.5,
|
| 23 |
+
std_dist_type: str = 'gaussian',
|
| 24 |
+
# common
|
| 25 |
+
mixer_layer: str = 'depthwise_conv',
|
| 26 |
+
conv_norm: str = 'none',
|
| 27 |
+
pad_mode: str = 'constant',
|
| 28 |
+
disable_last_norm: bool = True,
|
| 29 |
+
layernorm: str = 'RMSNorm',
|
| 30 |
+
layernorm_eps: float = 1e-5,
|
| 31 |
+
layernorm_elementwise_affine: bool = True,
|
| 32 |
+
conv_bias: bool = True,
|
| 33 |
+
layer_scale_init_value: float = 1e-6,
|
| 34 |
+
weight_init_value: float = 1e-2,
|
| 35 |
+
# encoder specific
|
| 36 |
+
encoder_n_filters: int = 32,
|
| 37 |
+
encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2],
|
| 38 |
+
encoder_depths: str = "3-3-3-3-3-3-8",
|
| 39 |
+
# decoder specific
|
| 40 |
+
decoder_n_filters: int = 32,
|
| 41 |
+
decoder_ratios: Optional[List[int]] = None, # if None, same as encoder
|
| 42 |
+
decoder_depths: Optional[str] = None,
|
| 43 |
+
**kwargs
|
| 44 |
+
):
|
| 45 |
+
super().__init__(**kwargs)
|
| 46 |
+
self.channels = channels
|
| 47 |
+
self.corpus_normalize = corpus_normalize
|
| 48 |
+
self.causal = causal
|
| 49 |
+
self.vae_dim = vae_dim
|
| 50 |
+
self.fix_std = fix_std
|
| 51 |
+
self.std_dist_type = std_dist_type
|
| 52 |
+
|
| 53 |
+
# common parameters
|
| 54 |
+
self.conv_norm = conv_norm
|
| 55 |
+
self.pad_mode = pad_mode
|
| 56 |
+
self.layernorm_eps = layernorm_eps
|
| 57 |
+
self.disable_last_norm = disable_last_norm
|
| 58 |
+
self.layernorm = layernorm
|
| 59 |
+
self.layernorm_elementwise_affine = layernorm_elementwise_affine
|
| 60 |
+
self.conv_bias = conv_bias
|
| 61 |
+
self.layer_scale_init_value = layer_scale_init_value
|
| 62 |
+
self.weight_init_value = weight_init_value
|
| 63 |
+
self.mixer_layer = mixer_layer
|
| 64 |
+
|
| 65 |
+
# encoder specific parameters
|
| 66 |
+
self.encoder_n_filters = encoder_n_filters
|
| 67 |
+
self.encoder_ratios = encoder_ratios
|
| 68 |
+
self.encoder_depths = encoder_depths
|
| 69 |
+
|
| 70 |
+
# decoder specific parameters
|
| 71 |
+
self.decoder_ratios = decoder_ratios if decoder_ratios is not None else encoder_ratios
|
| 72 |
+
self.decoder_n_filters = decoder_n_filters
|
| 73 |
+
self.decoder_depths = decoder_depths
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class VibeVoiceSemanticTokenizerConfig(PretrainedConfig):
|
| 77 |
+
model_type = "vibevoice_semantic_tokenizer"
|
| 78 |
+
|
| 79 |
+
def __init__(
|
| 80 |
+
self,
|
| 81 |
+
channels: int = 1,
|
| 82 |
+
corpus_normalize: float = 0.0,
|
| 83 |
+
causal: bool = True,
|
| 84 |
+
vae_dim: int = 64,
|
| 85 |
+
fix_std: float = 0,
|
| 86 |
+
std_dist_type: str = 'none',
|
| 87 |
+
# common
|
| 88 |
+
mixer_layer: str = 'depthwise_conv',
|
| 89 |
+
conv_norm: str = 'none',
|
| 90 |
+
pad_mode: str = 'constant',
|
| 91 |
+
disable_last_norm: bool = True,
|
| 92 |
+
layernorm: str = 'RMSNorm',
|
| 93 |
+
layernorm_eps: float = 1e-5,
|
| 94 |
+
layernorm_elementwise_affine: bool = True,
|
| 95 |
+
conv_bias: bool = True,
|
| 96 |
+
layer_scale_init_value: float = 1e-6,
|
| 97 |
+
weight_init_value: float = 1e-2,
|
| 98 |
+
# encoder specific
|
| 99 |
+
encoder_n_filters: int = 32,
|
| 100 |
+
encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2],
|
| 101 |
+
encoder_depths: str = "3-3-3-3-3-3-8",
|
| 102 |
+
**kwargs
|
| 103 |
+
):
|
| 104 |
+
super().__init__(**kwargs)
|
| 105 |
+
self.channels = channels
|
| 106 |
+
self.corpus_normalize = corpus_normalize
|
| 107 |
+
self.causal = causal
|
| 108 |
+
self.vae_dim = vae_dim
|
| 109 |
+
self.fix_std = fix_std
|
| 110 |
+
self.std_dist_type = std_dist_type
|
| 111 |
+
|
| 112 |
+
# common parameters
|
| 113 |
+
self.conv_norm = conv_norm
|
| 114 |
+
self.pad_mode = pad_mode
|
| 115 |
+
self.layernorm_eps = layernorm_eps
|
| 116 |
+
self.disable_last_norm = disable_last_norm
|
| 117 |
+
self.layernorm = layernorm
|
| 118 |
+
self.layernorm_elementwise_affine = layernorm_elementwise_affine
|
| 119 |
+
self.conv_bias = conv_bias
|
| 120 |
+
self.layer_scale_init_value = layer_scale_init_value
|
| 121 |
+
self.weight_init_value = weight_init_value
|
| 122 |
+
self.mixer_layer = mixer_layer
|
| 123 |
+
|
| 124 |
+
# encoder specific parameters
|
| 125 |
+
self.encoder_n_filters = encoder_n_filters
|
| 126 |
+
self.encoder_ratios = encoder_ratios
|
| 127 |
+
self.encoder_depths = encoder_depths
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class VibeVoiceDiffusionHeadConfig(PretrainedConfig):
|
| 131 |
+
model_type = "vibevoice_diffusion_head"
|
| 132 |
+
|
| 133 |
+
def __init__(
|
| 134 |
+
self,
|
| 135 |
+
hidden_size=768,
|
| 136 |
+
head_layers=4,
|
| 137 |
+
head_ffn_ratio=3.0,
|
| 138 |
+
rms_norm_eps=1e-5,
|
| 139 |
+
latent_size=64,
|
| 140 |
+
speech_vae_dim=None,
|
| 141 |
+
prediction_type="v_prediction",
|
| 142 |
+
diffusion_type="ddpm",
|
| 143 |
+
ddpm_num_steps=1000,
|
| 144 |
+
ddpm_num_inference_steps=20,
|
| 145 |
+
ddpm_beta_schedule="cosine",
|
| 146 |
+
ddpm_batch_mul=4,
|
| 147 |
+
**kwargs
|
| 148 |
+
):
|
| 149 |
+
self.hidden_size = hidden_size
|
| 150 |
+
self.head_layers = head_layers
|
| 151 |
+
self.head_ffn_ratio = head_ffn_ratio
|
| 152 |
+
self.rms_norm_eps = rms_norm_eps
|
| 153 |
+
self.latent_size = latent_size
|
| 154 |
+
self.speech_vae_dim = speech_vae_dim
|
| 155 |
+
self.prediction_type = prediction_type
|
| 156 |
+
self.diffusion_type = diffusion_type
|
| 157 |
+
self.ddpm_num_steps = ddpm_num_steps
|
| 158 |
+
self.ddpm_num_inference_steps = ddpm_num_inference_steps
|
| 159 |
+
self.ddpm_beta_schedule = ddpm_beta_schedule
|
| 160 |
+
self.ddpm_batch_mul = ddpm_batch_mul
|
| 161 |
+
|
| 162 |
+
super().__init__(**kwargs)
|
| 163 |
+
|
| 164 |
+
class VibeVoiceConfig(PretrainedConfig):
|
| 165 |
+
model_type = "vibevoice"
|
| 166 |
+
is_composition = True
|
| 167 |
+
sub_configs = {
|
| 168 |
+
"acoustic_tokenizer_config": VibeVoiceAcousticTokenizerConfig,
|
| 169 |
+
"semantic_tokenizer_config": VibeVoiceSemanticTokenizerConfig,
|
| 170 |
+
"decoder_config": Qwen2Config,
|
| 171 |
+
"diffusion_head_config": VibeVoiceDiffusionHeadConfig,
|
| 172 |
+
}
|
| 173 |
+
# keys_to_ignore_at_inference = ["past_key_values"]
|
| 174 |
+
# Default tensor parallel plan for base model `Qwen2`
|
| 175 |
+
base_model_tp_plan = {
|
| 176 |
+
"layers.*.self_attn.q_proj": "colwise",
|
| 177 |
+
"layers.*.self_attn.k_proj": "colwise",
|
| 178 |
+
"layers.*.self_attn.v_proj": "colwise",
|
| 179 |
+
"layers.*.self_attn.o_proj": "rowwise",
|
| 180 |
+
"layers.*.mlp.gate_proj": "colwise",
|
| 181 |
+
"layers.*.mlp.up_proj": "colwise",
|
| 182 |
+
"layers.*.mlp.down_proj": "rowwise",
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
def __init__(
|
| 186 |
+
self,
|
| 187 |
+
acoustic_tokenizer_config=None,
|
| 188 |
+
semantic_tokenizer_config=None,
|
| 189 |
+
decoder_config=None,
|
| 190 |
+
diffusion_head_config=None,
|
| 191 |
+
**kwargs
|
| 192 |
+
):
|
| 193 |
+
|
| 194 |
+
# kwargs["_attn_implementation"] = "flash_attention_2"
|
| 195 |
+
kwargs["_attn_implementation_autoset"] = False
|
| 196 |
+
|
| 197 |
+
if acoustic_tokenizer_config is None:
|
| 198 |
+
self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"]()
|
| 199 |
+
elif isinstance(acoustic_tokenizer_config, dict):
|
| 200 |
+
acoustic_tokenizer_config["model_type"] = "vibevoice_acoustic_tokenizer"
|
| 201 |
+
self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"](**acoustic_tokenizer_config)
|
| 202 |
+
elif isinstance(acoustic_tokenizer_config, VibeVoiceAcousticTokenizerConfig):
|
| 203 |
+
# If an instance of the config class is provided
|
| 204 |
+
self.acoustic_tokenizer_config = acoustic_tokenizer_config
|
| 205 |
+
|
| 206 |
+
if semantic_tokenizer_config is None:
|
| 207 |
+
self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"]()
|
| 208 |
+
elif isinstance(semantic_tokenizer_config, dict):
|
| 209 |
+
semantic_tokenizer_config["model_type"] = "vibevoice_semantic_tokenizer"
|
| 210 |
+
self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"](**semantic_tokenizer_config)
|
| 211 |
+
elif isinstance(semantic_tokenizer_config, VibeVoiceSemanticTokenizerConfig):
|
| 212 |
+
# If an instance of the config class is provided
|
| 213 |
+
self.semantic_tokenizer_config = semantic_tokenizer_config
|
| 214 |
+
|
| 215 |
+
if decoder_config is None:
|
| 216 |
+
self.decoder_config = self.sub_configs["decoder_config"]()
|
| 217 |
+
elif isinstance(decoder_config, dict):
|
| 218 |
+
# If a dictionary is provided, instantiate the config class with it
|
| 219 |
+
# self.decoder_config = self.sub_configs["decoder_config"](**decoder_config)
|
| 220 |
+
if decoder_config.get("model_type", '') == "qwen2":
|
| 221 |
+
self.decoder_config = Qwen2Config(**decoder_config)
|
| 222 |
+
else:
|
| 223 |
+
raise ValueError(f"Unsupported decoder model type: {decoder_config.get('model_type', '')}")
|
| 224 |
+
elif isinstance(decoder_config, (Qwen2Config,)):
|
| 225 |
+
# If an instance of the config class is provided
|
| 226 |
+
self.decoder_config = decoder_config
|
| 227 |
+
|
| 228 |
+
if diffusion_head_config is None:
|
| 229 |
+
self.diffusion_head_config = self.sub_configs["diffusion_head_config"]()
|
| 230 |
+
elif isinstance(diffusion_head_config, dict):
|
| 231 |
+
diffusion_head_config["model_type"] = "vibevoice_diffusion_head"
|
| 232 |
+
self.diffusion_head_config = self.sub_configs["diffusion_head_config"](**diffusion_head_config)
|
| 233 |
+
elif isinstance(diffusion_head_config, VibeVoiceDiffusionHeadConfig):
|
| 234 |
+
# If an instance of the config class is provided
|
| 235 |
+
self.diffusion_head_config = diffusion_head_config
|
| 236 |
+
|
| 237 |
+
# other parameters
|
| 238 |
+
self.acoustic_vae_dim = getattr(self.acoustic_tokenizer_config, 'vae_dim', 64)
|
| 239 |
+
self.semantic_vae_dim = getattr(self.semantic_tokenizer_config, 'vae_dim', 128)
|
| 240 |
+
|
| 241 |
+
super().__init__(**kwargs)
|
| 242 |
+
|
| 243 |
+
__all__ = [
|
| 244 |
+
"VibeVoiceAcousticTokenizerConfig",
|
| 245 |
+
"VibeVoiceSemanticTokenizerConfig",
|
| 246 |
+
"VibeVoiceDiffusionHeadConfig",
|
| 247 |
+
"VibeVoiceConfig"
|
| 248 |
+
]
|