amir0907 commited on
Commit
66a1f1c
·
verified ·
1 Parent(s): 00d5a20

Upload data via Kaggle

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. VibeVoice-finetuning/LICENSE +21 -0
  3. VibeVoice-finetuning/README.md +762 -0
  4. VibeVoice-finetuning/diff_head_layers.txt +26 -0
  5. VibeVoice-finetuning/pyproject.toml +35 -0
  6. VibeVoice-finetuning/src/__pycache__/data_vibevoice.cpython-312.pyc +0 -0
  7. VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora0.cpython-312.pyc +0 -0
  8. VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora105.cpython-312.pyc +0 -0
  9. VibeVoice-finetuning/src/data_vibevoice.py +453 -0
  10. VibeVoice-finetuning/src/finetune_vibevoice_lora.py +902 -0
  11. VibeVoice-finetuning/src/finetune_vibevoice_lora0.py +984 -0
  12. VibeVoice-finetuning/src/finetune_vibevoice_lora00.py +1005 -0
  13. VibeVoice-finetuning/src/finetune_vibevoice_lora10.py +1044 -0
  14. VibeVoice-finetuning/src/finetune_vibevoice_lora105.py +1044 -0
  15. VibeVoice-finetuning/src/finetune_vibevoice_lora120.py +1072 -0
  16. VibeVoice-finetuning/src/vibevoice/.DS_Store +0 -0
  17. VibeVoice-finetuning/src/vibevoice/configs/qwen2.5_1.5b_64k.json +112 -0
  18. VibeVoice-finetuning/src/vibevoice/configs/qwen2.5_7b_32k.json +113 -0
  19. VibeVoice-finetuning/src/vibevoice/modular/__init__.py +0 -0
  20. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-311.pyc +0 -0
  21. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-312.pyc +0 -0
  22. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-311.pyc +0 -0
  23. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-312.pyc +0 -0
  24. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-311.pyc +0 -0
  25. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-312.pyc +0 -0
  26. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-311.pyc +0 -0
  27. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-312.pyc +0 -0
  28. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-311.pyc +0 -0
  29. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-312.pyc +0 -0
  30. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-311.pyc +0 -0
  31. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-312.pyc +0 -0
  32. VibeVoice-finetuning/src/vibevoice/modular/configuration_vibevoice.py +248 -0
  33. VibeVoice-finetuning/src/vibevoice/modular/modeling_vibevoice.py +508 -0
  34. VibeVoice-finetuning/src/vibevoice/modular/modeling_vibevoice_inference.py +715 -0
  35. VibeVoice-finetuning/src/vibevoice/modular/modular_vibevoice_diffusion_head.py +287 -0
  36. VibeVoice-finetuning/src/vibevoice/modular/modular_vibevoice_text_tokenizer.py +214 -0
  37. VibeVoice-finetuning/src/vibevoice/modular/modular_vibevoice_tokenizer.py +1195 -0
  38. VibeVoice-finetuning/src/vibevoice/modular/streamer.py +264 -0
  39. VibeVoice-finetuning/src/vibevoice/processor/__init__.py +0 -0
  40. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/__init__.cpython-311.pyc +0 -0
  41. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/__init__.cpython-312.pyc +0 -0
  42. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-311.pyc +0 -0
  43. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-312.pyc +0 -0
  44. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_tokenizer_processor.cpython-311.pyc +0 -0
  45. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_tokenizer_processor.cpython-312.pyc +0 -0
  46. VibeVoice-finetuning/src/vibevoice/processor/preprocessor_config.json +13 -0
  47. VibeVoice-finetuning/src/vibevoice/processor/vibevoice_processor.py +677 -0
  48. VibeVoice-finetuning/src/vibevoice/processor/vibevoice_tokenizer_processor.py +483 -0
  49. VibeVoice-finetuning/src/vibevoice/schedule/__init__.py +0 -0
  50. VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/__init__.cpython-311.pyc +0 -0
.gitattributes CHANGED
@@ -58,3 +58,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ VibeVoice-finetuning/wandb/run-20260218_142500-puguclmi/run-puguclmi.wandb filter=lfs diff=lfs merge=lfs -text
62
+ VibeVoice-finetuning/wandb/run-20260218_143617-09tsct60/run-09tsct60.wandb filter=lfs diff=lfs merge=lfs -text
63
+ VibeVoice-finetuning/wandb/run-20260218_144236-a0h99ykt/run-a0h99ykt.wandb filter=lfs diff=lfs merge=lfs -text
64
+ VibeVoice-finetuning/wandb/run-20260224_190843-7tbryq2x/run-7tbryq2x.wandb filter=lfs diff=lfs merge=lfs -text
VibeVoice-finetuning/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Resemble AI
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
VibeVoice-finetuning/README.md ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ # Unofficial WIP Finetuning repo for VibeVoice
5
+
6
+
7
+
8
+ # Hardware requirements
9
+
10
+
11
+
12
+ To train a VibeVoice 1.5B LoRa, a machine with at least 16gb VRAM is recommended.
13
+
14
+ To train a VibeVoice 7B LoRa, a machine with at least 48gb VRAM is recommended.
15
+
16
+ Keep in mind longer audios increase VRAM requirements
17
+
18
+
19
+
20
+ # Installation
21
+
22
+ It is recommended to install this in a fresh environment. Specifically, the Dockerized environment `runpod/pytorch:2.8.0-py3.11-cuda12.8.1-cudnn-devel-ubuntu22.04` has been tested to work.
23
+
24
+
25
+
26
+ Transformers version 4.51.3 is known to work, while other versions have errors related to Qwen2 architecture.
27
+
28
+
29
+ ```
30
+ git clone https://github.com/voicepowered-ai/VibeVoice-finetuning
31
+
32
+ pip install -e .
33
+
34
+ pip uninstall -y transformers && pip install transformers==4.51.3
35
+
36
+ (OPTIONAL) wandb login
37
+
38
+ (OPTIONAL) export HF_HOME=/workspace/hf_models
39
+ ```
40
+
41
+
42
+
43
+ # Usage
44
+
45
+
46
+
47
+ ## VibeVoice 1.5B / 7B (LoRA) fine-tuning
48
+
49
+
50
+
51
+
52
+
53
+ We put some code together for training VibeVoice (7B) with LoRA. This uses the vendored VibeVoice model/processor and trains with a dual loss: masked CE on text tokens plus diffusion MSE on acoustic latents.
54
+
55
+
56
+
57
+
58
+
59
+ Requirements:
60
+
61
+
62
+
63
+ - Download a compatible VibeVoice 7B or 1.5b checkpoint (config + weights) and its processor files (preprocessor_config.json) or run straight from HF model.
64
+
65
+ - A 24khz audio dataset with audio files (target audio), text prompts (transcriptions) and optionally voice prompts (reference audio)
66
+
67
+
68
+
69
+
70
+
71
+
72
+ ### Training with Hugging Face Dataset
73
+
74
+
75
+ ```
76
+ python -m src.finetune_vibevoice_lora \
77
+
78
+ --model_name_or_path aoi-ot/VibeVoice-Large \
79
+
80
+ --processor_name_or_path src/vibevoice/processor \
81
+
82
+ --dataset_name your/dataset \
83
+
84
+ --text_column_name text \
85
+
86
+ --audio_column_name audio \
87
+
88
+ --voice_prompts_column_name voice_prompts \
89
+
90
+ --output_dir outputTrain3 \
91
+
92
+ --per_device_train_batch_size 8 \
93
+
94
+ --gradient_accumulation_steps 16 \
95
+
96
+ --learning_rate 2.5e-5 \
97
+
98
+ --num_train_epochs 5 \
99
+
100
+ --logging_steps 10 \
101
+
102
+ --save_steps 100 \
103
+
104
+ --eval_steps 100 \
105
+
106
+ --report_to wandb \
107
+
108
+ --remove_unused_columns False \
109
+
110
+ --bf16 True \
111
+
112
+ --do_train \
113
+
114
+ --gradient_clipping \
115
+
116
+ --gradient_checkpointing False \
117
+
118
+ --ddpm_batch_mul 4 \
119
+
120
+ --diffusion_loss_weight 1.4 \
121
+
122
+ --train_diffusion_head True \
123
+
124
+ --ce_loss_weight 0.04 \
125
+
126
+ --voice_prompt_drop_rate 0.2 \
127
+
128
+ --lora_target_modules q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj \
129
+
130
+ --lr_scheduler_type cosine \
131
+
132
+ --warmup_ratio 0.03 \
133
+
134
+ --max_grad_norm 0.8
135
+ ```
136
+
137
+
138
+ ----------
139
+
140
+
141
+
142
+ ### Training with Local JSONL Dataset
143
+
144
+
145
+ ```
146
+ python -m src.finetune_vibevoice_lora \
147
+
148
+ --model_name_or_path aoi-ot/VibeVoice-Large \
149
+
150
+ --processor_name_or_path src/vibevoice/processor \
151
+
152
+ --train_jsonl prompts.jsonl \
153
+
154
+ --text_column_name text \
155
+
156
+ --audio_column_name audio \
157
+
158
+ --output_dir outputTrain3 \
159
+
160
+ --per_device_train_batch_size 8 \
161
+
162
+ --gradient_accumulation_steps 16 \
163
+
164
+ --learning_rate 2.5e-5 \
165
+
166
+ --num_train_epochs 5 \
167
+
168
+ --logging_steps 10 \
169
+
170
+ --save_steps 100 \
171
+
172
+ --report_to wandb \
173
+
174
+ --remove_unused_columns False \
175
+
176
+ --bf16 True \
177
+
178
+ --do_train \
179
+
180
+ --gradient_clipping \
181
+
182
+ --gradient_checkpointing False \
183
+
184
+ --ddpm_batch_mul 4 \
185
+
186
+ --diffusion_loss_weight 1.4 \
187
+
188
+ --train_diffusion_head True \
189
+
190
+ --ce_loss_weight 0.04 \
191
+
192
+ --voice_prompt_drop_rate 0.2 \
193
+
194
+ --lora_target_modules q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj \
195
+
196
+ --lr_scheduler_type cosine \
197
+
198
+ --warmup_ratio 0.03 \
199
+
200
+ --max_grad_norm 0.8
201
+ ```
202
+
203
+
204
+ ### JSONL format:
205
+
206
+
207
+
208
+ You can provide an optional `voice_prompts` key. If it is omitted, a voice prompt will be automatically generated from the target audio.
209
+
210
+
211
+
212
+ **Example without a pre-defined voice prompt (will be auto-generated):**
213
+
214
+ `{"text": "Speaker 0: Speaker0 transcription.", "audio": "/workspace/wavs/segment_000000.wav"}`
215
+
216
+
217
+
218
+ **Example with a pre-defined voice prompt:**
219
+
220
+ `{"text": "Speaker 0: Speaker0 transcription.", "audio": "/workspace/wavs/segment_000000.wav", "voice_prompts": "/path/to/a/different/prompt.wav"}`
221
+
222
+
223
+
224
+ **Example with multiple speakers and voice prompts:**
225
+
226
+ `{"text": "Speaker 0: How is the project coming along?\nSpeaker 1: It's going well, we should be finished by Friday.", "audio": "/data/conversations/convo_01.wav", "voice_prompts": ["/data/prompts/alice_voice_prompt.wav", "/data/prompts/bob_voice_prompt.wav"]}`
227
+
228
+
229
+
230
+
231
+
232
+ # Notes:
233
+
234
+
235
+
236
+ - Audio is assumed to be 24 kHz; input audio will be loaded/resampled to 24 kHz.
237
+
238
+
239
+
240
+ - If you pass raw NumPy arrays or torch Tensors as audio (without sampling rate metadata), the collator assumes they are already 24 kHz. To trigger resampling, provide dicts like {"array": <np.ndarray>, "sampling_rate": <int>} or file paths.
241
+
242
+
243
+
244
+ - Tokenizers (acoustic/semantic) are frozen by default. LoRA is applied to the LLM (Qwen) and optionally to the diffusion head.
245
+
246
+
247
+
248
+ - The collator builds interleaved sequences with speech placeholders and computes the required masks for diffusion loss.
249
+
250
+ - If a voice_prompts column is not provided in your dataset for a given sample, a voice prompt is **automatically generated** by taking a random clip from the target audio. This fallback ensures the model's voice cloning ability is maintained. You can override this behavior by providing your own voice prompts.
251
+
252
+ - Said voice_prompts are randomly dropped during training to improve generalization. Drop rates of 0.2 and 0.25 have been tested with satisfactory results.
253
+
254
+
255
+
256
+ - The model learns to emit a closing `[speech_end]` token after target placeholders.
257
+
258
+
259
+
260
+ - For multi‑speaker prompts, ensure `voice_prompts` list order matches `Speaker 0/1/...` tags in your text.
261
+
262
+
263
+
264
+ - LoRA adapters are saved under `output_dir/lora` after training.
265
+
266
+
267
+
268
+
269
+
270
+ # Acknowledgements
271
+
272
+
273
+
274
+ - [VibeVoice](https://github.com/microsoft/VibeVoice)
275
+
276
+
277
+
278
+ - [chatterbox-finetuning](https://github.com/stlohrey/chatterbox-finetuning)
279
+
280
+
281
+
282
+
283
+ ## Training Script Arguments
284
+
285
+
286
+
287
+ Comprehensive list of all the command-line arguments available for the fine-tuning script.
288
+
289
+
290
+
291
+ ### Model & Architecture Arguments
292
+
293
+ Controls the base model, its configuration, and which components are trained.
294
+
295
+
296
+
297
+ * `--model_name_or_path`
298
+
299
+ * **What it does:** Specifies the path to the pretrained VibeVoice base model. This can be a local directory or a Hugging Face Hub repository ID.
300
+
301
+ * **Required:** Yes.
302
+
303
+ * **Example:**
304
+
305
+ ```bash
306
+
307
+ --model_name_or_path aoi-ot/VibeVoice-Large
308
+
309
+ ```
310
+
311
+
312
+
313
+ * `--processor_name_or_path`
314
+
315
+ * **What it does:** Specifies the path to the VibeVoice processor configuration. If not provided, it defaults to the `model_name_or_path`.
316
+
317
+ * **Example:**
318
+
319
+ ```bash
320
+
321
+ --processor_name_or_path src/vibevoice/processor
322
+
323
+ ```
324
+
325
+
326
+
327
+ * `--train_diffusion_head`
328
+
329
+ * **What it does:** A boolean flag to enable **full fine-tuning** of the diffusion prediction head. When enabled, all parameters of the diffusion head become trainable.
330
+
331
+ * **Example:**
332
+
333
+ ```bash
334
+
335
+ --train_diffusion_head True
336
+
337
+ ```
338
+
339
+
340
+
341
+ * `--train_connectors`
342
+
343
+ * **What it does:** A boolean flag to enable training of the acoustic and semantic connectors, which bridge different parts of the model.
344
+
345
+ * **Example:**
346
+
347
+ ```bash
348
+
349
+ --train_connectors True
350
+
351
+ ```
352
+
353
+
354
+
355
+ * `--lora_target_modules`
356
+
357
+ * **What it does:** A comma-separated string of module names within the language model to apply LoRA adapters to. This is the primary way to enable LoRA for the text-processing part of the model.
358
+
359
+ * **Example:**
360
+
361
+ ```bash
362
+
363
+ --lora_target_modules q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj
364
+
365
+ ```
366
+
367
+
368
+
369
+ * `--lora_r`
370
+
371
+ * **What it does:** The rank (`r`) of the LoRA decomposition. A smaller number means fewer trainable parameters.
372
+
373
+ * **Default:** `8`
374
+
375
+ * **Example:**
376
+
377
+ ```bash
378
+
379
+ --lora_r 16
380
+
381
+ ```
382
+
383
+
384
+
385
+ * `--lora_alpha`
386
+
387
+ * **What it does:** The scaling factor for the LoRA weights. A common practice is to set `lora_alpha` to be four times the value of `lora_r`.
388
+
389
+ * **Default:** `32`
390
+
391
+ * **Example:**
392
+
393
+ ```bash
394
+
395
+ --lora_alpha 64
396
+
397
+ ```
398
+
399
+ * `--lora_wrap_diffusion_head`
400
+
401
+ * **What it does:** An **alternative** to `--train_diffusion_head`. If `True`, it applies LoRA adapters to the diffusion head instead of fine-tuning it fully, enabling more parameter-efficient training of the head. Must only use `--train_diffusion_head` or `--lora_wrap_diffusion_head`
402
+
403
+ * **Default:** `False`
404
+
405
+
406
+
407
+
408
+
409
+ * `--layers_to_freeze`
410
+
411
+ * **What it does:** Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8'). [Diffusion head layer indices](https://github.com/voicepowered-ai/VibeVoice-finetuning/blob/main/diff_head_layers.txt)
412
+
413
+ * **Default:** `None`
414
+
415
+ ### Data & Processing Arguments
416
+
417
+ Defines the dataset to be used, its structure, and how it should be processed.
418
+
419
+
420
+
421
+ * `--train_jsonl`
422
+
423
+ * **What it does:** Path to your local training data file in JSONL (JSON Lines) format. Each line should be a JSON object with keys for text and audio path.
424
+
425
+ * **Example:**
426
+
427
+ ```bash
428
+
429
+ --train_jsonl prompts.jsonl
430
+
431
+ ```
432
+
433
+
434
+
435
+ * `--validation_jsonl`
436
+
437
+ * **What it does:** Optional path to a local validation data file in JSONL format.
438
+
439
+ * **Example:**
440
+
441
+ ```bash
442
+
443
+ --validation_jsonl validation_prompts.jsonl
444
+
445
+ ```
446
+
447
+
448
+
449
+ * `--text_column_name`
450
+
451
+ * **What it does:** The name of the key in your JSONL file that contains the text transcription/prompt.
452
+
453
+ * **Default:** `text`
454
+
455
+ * **Example:**
456
+
457
+ ```bash
458
+
459
+ --text_column_name "prompt"
460
+
461
+ ```
462
+
463
+
464
+
465
+ * `--audio_column_name`
466
+
467
+ * **What it does:** The name of the key in your JSONL file that contains the path to the audio file.
468
+
469
+ * **Default:** `audio`
470
+
471
+ * **Example:**
472
+
473
+ ```bash
474
+
475
+ --audio_column_name "file_path"
476
+
477
+ ```
478
+
479
+
480
+
481
+ * `--voice_prompt_drop_rate`
482
+
483
+ * **What it does:** The probability (from 0.0 to 1.0) of randomly dropping the conditioning voice prompt during training. This acts as a regularizer.
484
+
485
+ * **Default:** `0.0`
486
+
487
+ * **Example:**
488
+
489
+ ```bash
490
+
491
+ --voice_prompt_drop_rate 0.2
492
+
493
+ ```
494
+
495
+
496
+
497
+ ### Core Training Arguments
498
+
499
+ Standard Hugging Face `TrainingArguments` that control the training loop, optimizer, and saving.
500
+
501
+
502
+
503
+ * `--output_dir`
504
+
505
+ * **What it does:** The directory where model checkpoints and final outputs will be saved.
506
+
507
+ * **Required:** Yes.
508
+
509
+ * **Example:**
510
+
511
+ ```bash
512
+
513
+ --output_dir output_model
514
+
515
+ ```
516
+
517
+
518
+
519
+ * `--per_device_train_batch_size`
520
+
521
+ * **What it does:** The number of training examples processed per GPU in a single step.
522
+
523
+ * **Example:**
524
+
525
+ ```bash
526
+
527
+ --per_device_train_batch_size 8
528
+
529
+ ```
530
+
531
+
532
+
533
+ * `--gradient_accumulation_steps`
534
+
535
+ * **What it does:** The number of forward passes to accumulate gradients for before performing an optimizer step. This effectively increases the batch size without using more VRAM.
536
+
537
+ * **Example:**
538
+
539
+ ```bash
540
+
541
+ --gradient_accumulation_steps 16
542
+
543
+ ```
544
+
545
+
546
+
547
+ * `--learning_rate`
548
+
549
+ * **What it does:** The initial learning rate for the optimizer.
550
+
551
+ * **Example:**
552
+
553
+ ```bash
554
+
555
+ --learning_rate 2.5e-5
556
+
557
+ ```
558
+
559
+
560
+
561
+ * `--num_train_epochs`
562
+
563
+ * **What it does:** The total number of times to iterate over the entire training dataset.
564
+
565
+ * **Example:**
566
+
567
+ ```bash
568
+
569
+ --num_train_epochs 5
570
+
571
+ ```
572
+
573
+
574
+
575
+ * `--logging_steps`
576
+
577
+ * **What it does:** How often (in steps) to log training metrics like loss.
578
+
579
+ * **Example:**
580
+
581
+ ```bash
582
+
583
+ --logging_steps 10
584
+
585
+ ```
586
+
587
+
588
+
589
+ * `--save_steps`
590
+
591
+ * **What it does:** How often (in steps) to save a model checkpoint.
592
+
593
+ * **Example:**
594
+
595
+ ```bash
596
+
597
+ --save_steps 100
598
+
599
+ ```
600
+
601
+
602
+
603
+ * `--report_to`
604
+
605
+ * **What it does:** The integration to report logs to. Can be `wandb`, `tensorboard`, or `none`.
606
+
607
+ * **Example:**
608
+
609
+ ```bash
610
+
611
+ --report_to wandb
612
+
613
+ ```
614
+
615
+
616
+
617
+ * `--remove_unused_columns`
618
+
619
+ * **What it does:** Whether to remove columns from the dataset not used by the model's `forward` method. **This must be set to `False`** for this script to work correctly.
620
+
621
+ * **Example:**
622
+
623
+ ```bash
624
+
625
+ --remove_unused_columns False
626
+
627
+ ```
628
+
629
+
630
+
631
+ * `--bf16`
632
+
633
+ * **What it does:** Enables mixed-precision training using `bfloat16`. This speeds up training and reduces memory usage on compatible GPUs (NVIDIA Ampere series and newer).
634
+
635
+ * **Example:**
636
+
637
+ ```bash
638
+
639
+ --bf16 True
640
+
641
+ ```
642
+
643
+
644
+
645
+ * `--gradient_checkpointing`
646
+
647
+ * **What it does:** A memory-saving technique that trades compute for memory. Useful for training large models on limited VRAM.
648
+
649
+ * **Example:**
650
+
651
+ ```bash
652
+
653
+ --gradient_checkpointing True
654
+
655
+ ```
656
+
657
+
658
+
659
+ * `--lr_scheduler_type`
660
+
661
+ * **What it does:** The type of learning rate schedule to use (e.g., `linear`, `cosine`, `constant`).
662
+
663
+ * **Example:**
664
+
665
+ ```bash
666
+
667
+ --lr_scheduler_type cosine
668
+
669
+ ```
670
+
671
+
672
+
673
+ * `--warmup_ratio`
674
+
675
+ * **What it does:** The proportion of total training steps used for a linear warmup from 0 to the initial learning rate.
676
+
677
+ * **Example:**
678
+
679
+ ```bash
680
+
681
+ --warmup_ratio 0.03
682
+
683
+ ```
684
+
685
+
686
+
687
+ ### Custom VibeVoice Training Arguments
688
+
689
+ Special arguments to control VibeVoice-specific training behaviors.
690
+
691
+
692
+
693
+ * `--gradient_clipping`
694
+
695
+ * **What it does:** A custom boolean flag that acts as the master switch for gradient clipping. If you include this flag, the value from `--max_grad_norm` will be used to prevent exploding gradients.
696
+
697
+ * **Example:**
698
+
699
+ ```bash
700
+
701
+ --gradient_clipping
702
+
703
+ ```
704
+
705
+ * `--max_grad_norm`
706
+
707
+ * **What it does:** The maximum value for gradient clipping. Only active if `--gradient_clipping` is also used.
708
+
709
+ * **Default:** `1.0`
710
+
711
+ * **Example:**
712
+
713
+ ```bash
714
+
715
+ --max_grad_norm 0.8
716
+
717
+ ```
718
+
719
+
720
+
721
+ * `--diffusion_loss_weight`
722
+
723
+ * **What it does:** A float that scales the importance of the diffusion loss (for speech generation quality) in the total loss calculation.
724
+
725
+ * **Example:**
726
+
727
+ ```bash
728
+
729
+ --diffusion_loss_weight 1.4
730
+
731
+ ```
732
+
733
+
734
+
735
+ * `--ce_loss_weight`
736
+
737
+ * **What it does:** A float that scales the importance of the Cross-Entropy loss (for text prediction accuracy) in the total loss calculation.
738
+
739
+ * **Example:**
740
+
741
+ ```bash
742
+
743
+ --ce_loss_weight 0.04
744
+
745
+ ```
746
+
747
+
748
+
749
+ * `--ddpm_batch_mul`
750
+
751
+ * **What it does:** An integer multiplier for the batch size used specifically within the diffusion process.
752
+
753
+ * **Example:**
754
+
755
+ ```bash
756
+
757
+ --ddpm_batch_mul 4
758
+
759
+
760
+ ```
761
+
762
+
VibeVoice-finetuning/diff_head_layers.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0] noisy_images_proj.weight (shape: (3584, 64), trainable: True)
2
+ [1] cond_proj.weight (shape: (3584, 3584), trainable: True)
3
+ [2] t_embedder.mlp.0.weight (shape: (3584, 256), trainable: True)
4
+ [3] t_embedder.mlp.2.weight (shape: (3584, 3584), trainable: True)
5
+ [4] layers.0.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
6
+ [5] layers.0.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
7
+ [6] layers.0.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
8
+ [7] layers.0.norm.weight (shape: (3584,), trainable: True)
9
+ [8] layers.0.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
10
+ [9] layers.1.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
11
+ [10] layers.1.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
12
+ [11] layers.1.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
13
+ [12] layers.1.norm.weight (shape: (3584,), trainable: True)
14
+ [13] layers.1.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
15
+ [14] layers.2.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
16
+ [15] layers.2.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
17
+ [16] layers.2.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
18
+ [17] layers.2.norm.weight (shape: (3584,), trainable: True)
19
+ [18] layers.2.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
20
+ [19] layers.3.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
21
+ [20] layers.3.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
22
+ [21] layers.3.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
23
+ [22] layers.3.norm.weight (shape: (3584,), trainable: True)
24
+ [23] layers.3.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
25
+ [24] final_layer.linear.weight (shape: (64, 3584), trainable: True)
26
+ [25] final_layer.adaLN_modulation.1.weight (shape: (7168, 3584), trainable: True)
VibeVoice-finetuning/pyproject.toml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "vibevoice-finetuning"
3
+ version = "0.1.0"
4
+ description = "Open Source finetuning code for VibeVoice"
5
+ readme = "README.md"
6
+ requires-python = ">=3.8"
7
+ license = {file = "LICENSE"}
8
+ authors = [
9
+ {name = "jpgallegoarvpb", email = "juanpablo.gallego@voicepowered.ai"}
10
+ ]
11
+ dependencies = [
12
+ "numpy~=1.26.0",
13
+ "resampy==0.4.3",
14
+ "librosa==0.11.0",
15
+ "s3tokenizer",
16
+ "torch",
17
+ "torchaudio",
18
+ "transformers",
19
+ "datasets>=2.18.0",
20
+ "diffusers==0.29.0",
21
+ "resemble-perth==1.0.1",
22
+ "omegaconf==2.3.0",
23
+ "conformer==0.3.2",
24
+ "safetensors==0.5.3",
25
+ "peft>=0.11.0",
26
+ "tensorboard>=2.12",
27
+ "wandb"
28
+ ]
29
+
30
+ [build-system]
31
+ requires = ["setuptools>=61.0"]
32
+ build-backend = "setuptools.build_meta"
33
+
34
+ [tool.setuptools.packages.find]
35
+ where = ["src"]
VibeVoice-finetuning/src/__pycache__/data_vibevoice.cpython-312.pyc ADDED
Binary file (23.2 kB). View file
 
VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora0.cpython-312.pyc ADDED
Binary file (64.9 kB). View file
 
VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora105.cpython-312.pyc ADDED
Binary file (68.5 kB). View file
 
VibeVoice-finetuning/src/data_vibevoice.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+ import warnings
8
+ import random
9
+
10
+ try:
11
+ import librosa # type: ignore
12
+ except Exception: # pragma: no cover
13
+ librosa = None # Fallback: user must install librosa when using local audio paths
14
+
15
+ try:
16
+ import resampy # type: ignore
17
+ except Exception: # pragma: no cover
18
+ resampy = None
19
+
20
+
21
+ def _resample_if_needed(wav: np.ndarray, orig_sr: int, target_sr: int) -> np.ndarray:
22
+ if orig_sr == target_sr:
23
+ return wav.astype(np.float32, copy=False)
24
+ if resampy is not None:
25
+ return resampy.resample(wav.astype(np.float32), orig_sr, target_sr)
26
+ if librosa is not None:
27
+ return librosa.resample(y=wav.astype(np.float32), orig_sr=orig_sr, target_sr=target_sr)
28
+ warnings.warn(
29
+ "No resampler available; treating audio as target_sr without resampling. Install resampy or librosa.",
30
+ RuntimeWarning,
31
+ )
32
+ return wav.astype(np.float32, copy=False)
33
+
34
+
35
+ # Lightweight HF-style dataset wrapper (optional). Trainer can also pass raw HF datasets directly.
36
+ class VibeVoiceDataset:
37
+ def __init__(
38
+ self,
39
+ dataset: Any,
40
+ text_column: str = "text",
41
+ audio_column: str = "audio",
42
+ voice_prompts_column: Optional[str] = "voice_prompts",
43
+ ) -> None:
44
+ self.dataset = dataset
45
+ self.text_column = text_column
46
+ self.audio_column = audio_column
47
+ self.voice_prompts_column = voice_prompts_column
48
+
49
+ def __len__(self) -> int:
50
+ return len(self.dataset)
51
+
52
+ def __getitem__(self, idx: int) -> Dict[str, Any]:
53
+ item = self.dataset[idx]
54
+ data: Dict[str, Any] = {}
55
+ data["text"] = item[self.text_column]
56
+ data["audio"] = item[self.audio_column]
57
+
58
+ user_provided_prompt = None
59
+ if self.voice_prompts_column and self.voice_prompts_column in item:
60
+ user_provided_prompt = item[self.voice_prompts_column]
61
+
62
+ if user_provided_prompt:
63
+ # A prompt was provided in the dataset, so we use it.
64
+ if not isinstance(user_provided_prompt, list):
65
+ data["voice_prompts"] = [user_provided_prompt]
66
+ else:
67
+ data["voice_prompts"] = user_provided_prompt
68
+ else:
69
+ # FALLBACK: No prompt provided, so we auto-generate one from the target audio.
70
+ try:
71
+ target_sr = 24000
72
+ wav_array = _load_audio_to_24k(item[self.audio_column], target_sr=target_sr)
73
+ audio_len_seconds = len(wav_array) / target_sr
74
+
75
+ min_len_sec = min(5.0, audio_len_seconds / 4.0)
76
+ max_len_sec = min(15.0, audio_len_seconds / 2.0)
77
+
78
+ if min_len_sec > max_len_sec:
79
+ min_len_sec = max_len_sec
80
+ max_len_sec = min(max_len_sec, audio_len_seconds)
81
+
82
+ if max_len_sec > 0.1:
83
+ prompt_len_sec = random.uniform(min_len_sec, max_len_sec)
84
+ prompt_len_samples = int(prompt_len_sec * target_sr)
85
+
86
+ max_start_sample = len(wav_array) - prompt_len_samples
87
+ start_sample = random.randint(0, max_start_sample)
88
+
89
+ prompt_crop = wav_array[start_sample : start_sample + prompt_len_samples]
90
+
91
+ data["voice_prompts"] = [prompt_crop]
92
+ else:
93
+ data["voice_prompts"] = None
94
+
95
+ except Exception as e:
96
+ warnings.warn(f"Could not create voice prompt for item {idx}: {e}")
97
+ data["voice_prompts"] = None
98
+ return data
99
+
100
+
101
+ def _apply_silence_with_crossfade(
102
+ wav: np.ndarray,
103
+ *,
104
+ sample_rate: int,
105
+ pre_silence_sec: float = 0.25,
106
+ pre_crossfade_sec: float = 0.25,
107
+ post_crossfade_sec: float = 0.25,
108
+ post_silence_sec: float = 0.75,
109
+ ) -> np.ndarray:
110
+ """Pad audio with leading/trailing silence and apply crossfades.
111
+
112
+ Structure: [pre_silence][pre_crossfade][audio_body][post_crossfade][post_silence]
113
+ Crossfades blend the audio with silence linearly to avoid hard edges.
114
+ """
115
+
116
+ wav = np.asarray(wav, dtype=np.float32).reshape(-1)
117
+
118
+ start_sil_samples = int(round(pre_silence_sec * sample_rate))
119
+ end_sil_samples = int(round(post_silence_sec * sample_rate))
120
+ pre_crossfade_samples = int(round(pre_crossfade_sec * sample_rate))
121
+ post_crossfade_samples = int(round(post_crossfade_sec * sample_rate))
122
+
123
+ total_len = wav.shape[0]
124
+ if total_len == 0:
125
+ pieces: List[np.ndarray] = []
126
+ if start_sil_samples > 0:
127
+ pieces.append(np.zeros(start_sil_samples, dtype=np.float32))
128
+ if end_sil_samples > 0:
129
+ pieces.append(np.zeros(end_sil_samples, dtype=np.float32))
130
+ return np.concatenate(pieces) if pieces else wav
131
+
132
+ start_len = min(pre_crossfade_samples, total_len)
133
+ remaining_after_start = max(total_len - start_len, 0)
134
+ end_len = min(post_crossfade_samples, remaining_after_start)
135
+ middle_end_idx = total_len - end_len
136
+
137
+ start_segment = wav[:start_len]
138
+ middle_segment = wav[start_len:middle_end_idx]
139
+ end_segment = wav[middle_end_idx:]
140
+
141
+ def _linear_fade(num_samples: int, start: float, end: float) -> np.ndarray:
142
+ if num_samples <= 0:
143
+ return np.zeros((0,), dtype=np.float32)
144
+ return np.linspace(start, end, num_samples, endpoint=True, dtype=np.float32)
145
+
146
+ start_crossfade = start_segment * _linear_fade(start_len, 0.0, 1.0)
147
+ end_crossfade = end_segment * _linear_fade(end_segment.shape[0], 1.0, 0.0)
148
+
149
+ pieces: List[np.ndarray] = []
150
+ if start_sil_samples > 0:
151
+ pieces.append(np.zeros(start_sil_samples, dtype=np.float32))
152
+ if start_crossfade.size > 0:
153
+ pieces.append(start_crossfade.astype(np.float32, copy=False))
154
+ if middle_segment.size > 0:
155
+ pieces.append(middle_segment.astype(np.float32, copy=False))
156
+ if end_crossfade.size > 0:
157
+ pieces.append(end_crossfade.astype(np.float32, copy=False))
158
+ if end_sil_samples > 0:
159
+ pieces.append(np.zeros(end_sil_samples, dtype=np.float32))
160
+
161
+ return np.concatenate(pieces)
162
+
163
+
164
+ def _load_audio_to_24k(
165
+ audio: Union[str, np.ndarray, torch.Tensor, Dict[str, Any]],
166
+ *,
167
+ target_sr: int = 24000,
168
+ augment_with_silence: bool = False,
169
+ ) -> np.ndarray:
170
+ if isinstance(audio, np.ndarray):
171
+ wav_out = audio.astype(np.float32)
172
+ elif isinstance(audio, torch.Tensor):
173
+ wav_out = audio.detach().cpu().float().numpy()
174
+ elif isinstance(audio, str):
175
+ if librosa is None:
176
+ raise RuntimeError("librosa is required to load audio file paths. Please pip install librosa.")
177
+ wav, sr = librosa.load(audio, sr=None, mono=True)
178
+ wav_out = _resample_if_needed(wav, int(sr), target_sr)
179
+ elif isinstance(audio, dict) and "array" in audio and "sampling_rate" in audio:
180
+ arr = np.asarray(audio["array"], dtype=np.float32)
181
+ sr = int(audio["sampling_rate"])
182
+ wav_out = _resample_if_needed(arr, sr, target_sr)
183
+ else:
184
+ raise ValueError(f"Unsupported audio type: {type(audio)}")
185
+
186
+ wav_out = np.asarray(wav_out, dtype=np.float32)
187
+
188
+ if augment_with_silence:
189
+ wav_out = _apply_silence_with_crossfade(wav_out, sample_rate=target_sr)
190
+
191
+ return wav_out
192
+
193
+
194
+ @dataclass
195
+ class VibeVoiceCollator:
196
+ processor: Any # VibeVoiceProcessor
197
+ max_length: Optional[int] = None
198
+ speech_compress_ratio: int = 3200
199
+ semantic_vae_dim: int = 128
200
+ compute_semantics: bool = False
201
+ debug_checks: bool = False
202
+
203
+ text_field: str = "text"
204
+ audio_field: str = "audio"
205
+ voice_prompts_field: str = "voice_prompts"
206
+ voice_prompt_drop_rate: float = 0.0
207
+
208
+ def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, Any]:
209
+ batch_size = len(features)
210
+
211
+ sample_input_ids: List[List[int]] = []
212
+ sample_attention_masks: List[List[int]] = []
213
+ sample_acoustic_input_masks: List[List[bool]] = []
214
+ sample_acoustic_loss_masks: List[List[bool]] = []
215
+
216
+ all_speech_waveforms: List[np.ndarray] = []
217
+ all_speech_latent_lengths: List[int] = []
218
+ per_segment_is_target: List[bool] = []
219
+
220
+ for ex in features:
221
+ text: str = ex.get(self.text_field, "")
222
+ voice_prompts: Optional[List[Union[str, np.ndarray, torch.Tensor]]] = ex.get(self.voice_prompts_field)
223
+ target_audio: Union[str, np.ndarray, torch.Tensor, Dict[str, Any]] = ex.get(self.audio_field)
224
+
225
+ # Clamp drop rate for safety
226
+ _drop_rate = self.voice_prompt_drop_rate
227
+ if _drop_rate < 0.0:
228
+ _drop_rate = 0.0
229
+ elif _drop_rate > 1.0:
230
+ _drop_rate = 1.0
231
+
232
+ proc = self.processor(
233
+ text=[text],
234
+ voice_samples=[voice_prompts] if voice_prompts is not None and random.random() >= _drop_rate else None,
235
+ padding=False,
236
+ truncation=False,
237
+ max_length=self.max_length,
238
+ return_tensors="pt",
239
+ )
240
+
241
+ ids = proc["input_ids"][0].tolist()
242
+ attn = proc.get("attention_mask", torch.ones_like(proc["input_ids"]))[0].tolist()
243
+ speech_input_mask = proc.get("speech_input_mask")
244
+ if speech_input_mask is None:
245
+ speech_input_mask = torch.zeros_like(proc["input_ids"], dtype=torch.bool)
246
+ speech_input_mask_list = speech_input_mask[0].tolist()
247
+
248
+ wav_target = _load_audio_to_24k(target_audio, target_sr=24000, augment_with_silence=True)
249
+ # Prefer exact frame count from acoustic tokenizer if available; fallback to compress ratio
250
+ target_latent_len = None
251
+ try:
252
+ acoustic_tok = getattr(self.processor, "acoustic_tokenizer", None)
253
+ if acoustic_tok is not None and hasattr(acoustic_tok, "encode"):
254
+ enc_out = acoustic_tok.encode(wav_target)
255
+ # Normalize various possible return formats to get time dimension
256
+ T = None
257
+ try:
258
+ # Direct array-like with shape (T, D) or (T,)
259
+ if hasattr(enc_out, "shape") and len(getattr(enc_out, "shape", [])) >= 1:
260
+ T = int(enc_out.shape[0])
261
+ else:
262
+ # Nested lists/tuples or ModelOutput-like
263
+ cand = enc_out
264
+ # Drill down a couple of levels safely
265
+ for _ in range(2):
266
+ if isinstance(cand, (list, tuple)) and len(cand) > 0:
267
+ cand = cand[0]
268
+ if hasattr(cand, "shape") and len(getattr(cand, "shape", [])) >= 1:
269
+ T = int(cand.shape[0])
270
+ except Exception:
271
+ T = None
272
+ if T is not None and T > 0:
273
+ target_latent_len = T
274
+ except Exception:
275
+ target_latent_len = None
276
+ if target_latent_len is None:
277
+ target_latent_len = max(1, int(math.ceil(len(wav_target) / float(self.speech_compress_ratio))))
278
+
279
+ speech_diff_id = self.processor.tokenizer.speech_diffusion_id
280
+ target_placeholders = [speech_diff_id] * target_latent_len
281
+
282
+ ids_extended = ids + target_placeholders
283
+ attn_extended = attn + [1] * target_latent_len
284
+
285
+ acoustic_input_mask = speech_input_mask_list + [True] * target_latent_len
286
+ acoustic_loss_mask = ([False] * len(speech_input_mask_list)) + [True] * target_latent_len
287
+
288
+ speech_end_id = self.processor.tokenizer.speech_end_id
289
+ ids_extended.append(speech_end_id)
290
+ attn_extended.append(1)
291
+ acoustic_input_mask.append(False)
292
+ acoustic_loss_mask.append(False)
293
+
294
+ # Ensure text decoding sees an explicit end-of-sequence token after speech output.
295
+ eos_token_id = getattr(self.processor.tokenizer, "eos_id", None)
296
+ if eos_token_id is None:
297
+ eos_token_id = getattr(self.processor.tokenizer, "eos_token_id", None)
298
+ if eos_token_id is not None and eos_token_id >= 0:
299
+ ids_extended.append(eos_token_id)
300
+ attn_extended.append(1)
301
+ acoustic_input_mask.append(False)
302
+ acoustic_loss_mask.append(False)
303
+
304
+ if self.max_length is not None and len(ids_extended) > self.max_length:
305
+ cut = len(ids_extended) - int(self.max_length)
306
+ leading_non_acoustic = 0
307
+ for v in acoustic_input_mask:
308
+ if v:
309
+ break
310
+ leading_non_acoustic += 1
311
+ if cut > leading_non_acoustic:
312
+ raise ValueError(
313
+ f"--max_length={self.max_length} would truncate into acoustic tokens. "
314
+ f"Needed cut={cut}, but only {leading_non_acoustic} leading non-acoustic tokens available. "
315
+ "Increase max_length or shorten text/voice-prompt preamble."
316
+ )
317
+ ids_extended = ids_extended[cut:]
318
+ attn_extended = attn_extended[cut:]
319
+ acoustic_input_mask = acoustic_input_mask[cut:]
320
+ acoustic_loss_mask = acoustic_loss_mask[cut:]
321
+
322
+ sample_input_ids.append(ids_extended)
323
+ sample_attention_masks.append(attn_extended)
324
+ sample_acoustic_input_masks.append(acoustic_input_mask)
325
+ sample_acoustic_loss_masks.append(acoustic_loss_mask)
326
+
327
+ voice_speeches = []
328
+ voice_latent_lengths = []
329
+ if proc.get("speech_tensors") is not None:
330
+ voice_np = proc["speech_tensors"].cpu().numpy()
331
+ voice_masks = proc["speech_masks"].cpu().numpy().astype(bool)
332
+ for seg_idx in range(voice_np.shape[0]):
333
+ voice_speeches.append(voice_np[seg_idx])
334
+ voice_latent_lengths.append(int(voice_masks[seg_idx].sum()))
335
+
336
+ all_speech_waveforms.extend(voice_speeches)
337
+ all_speech_latent_lengths.extend(voice_latent_lengths)
338
+ per_segment_is_target.extend([False] * len(voice_speeches))
339
+
340
+ all_speech_waveforms.append(wav_target)
341
+ all_speech_latent_lengths.append(target_latent_len)
342
+ per_segment_is_target.append(True)
343
+
344
+ max_seq_len = max(len(x) for x in sample_input_ids)
345
+ padded_input_ids = []
346
+ padded_attention_masks = []
347
+ padded_acoustic_input_masks = []
348
+ padded_acoustic_loss_masks = []
349
+ tok = self.processor.tokenizer
350
+ pad_token_id = getattr(tok, "pad_token_id", None)
351
+ if pad_token_id is None or pad_token_id < 0:
352
+ pad_token_id = getattr(tok, "eos_token_id", None)
353
+ if pad_token_id is None or pad_token_id < 0:
354
+ raise ValueError(
355
+ "Tokenizer has no pad_token_id or eos_token_id; please set one or pass a valid pad id."
356
+ )
357
+ for ids, attn, ain_mask, aloss_mask in zip(
358
+ sample_input_ids, sample_attention_masks, sample_acoustic_input_masks, sample_acoustic_loss_masks
359
+ ):
360
+ pad_len = max_seq_len - len(ids)
361
+ padded_input_ids.append(ids + [pad_token_id] * pad_len)
362
+ padded_attention_masks.append(attn + [0] * pad_len)
363
+ padded_acoustic_input_masks.append(ain_mask + [False] * pad_len)
364
+ padded_acoustic_loss_masks.append(aloss_mask + [False] * pad_len)
365
+
366
+ input_ids_tensor = torch.tensor(padded_input_ids, dtype=torch.long)
367
+ attention_mask_tensor = torch.tensor(padded_attention_masks, dtype=torch.long)
368
+ acoustic_input_mask_tensor = torch.tensor(padded_acoustic_input_masks, dtype=torch.bool)
369
+ acoustic_loss_mask_tensor = torch.tensor(padded_acoustic_loss_masks, dtype=torch.bool)
370
+
371
+ if all_speech_waveforms:
372
+ max_wave_len = max(w.shape[0] for w in all_speech_waveforms)
373
+ padded_speeches = np.zeros((len(all_speech_waveforms), max_wave_len), dtype=np.float32)
374
+ for i, w in enumerate(all_speech_waveforms):
375
+ L = w.shape[0]
376
+ padded_speeches[i, :L] = w
377
+
378
+ max_latent_len = max(all_speech_latent_lengths) if all_speech_latent_lengths else 1
379
+ speech_masks_np = np.zeros((len(all_speech_waveforms), max_latent_len), dtype=np.bool_)
380
+ for i, L_lat in enumerate(all_speech_latent_lengths):
381
+ speech_masks_np[i, :L_lat] = True
382
+
383
+ speech_tensors_tensor = torch.tensor(padded_speeches, dtype=torch.float32)
384
+ speech_masks_tensor = torch.tensor(speech_masks_np, dtype=torch.bool)
385
+
386
+ speeches_loss_input_np = np.zeros_like(speech_masks_np, dtype=np.bool_)
387
+ for i, is_target in enumerate(per_segment_is_target):
388
+ if is_target:
389
+ speeches_loss_input_np[i] = speech_masks_np[i]
390
+ speeches_loss_input_tensor = torch.tensor(speeches_loss_input_np, dtype=torch.bool)
391
+
392
+ # Semantic features
393
+ if self.compute_semantics and hasattr(self.processor, "semantic_tokenizer") and self.processor.semantic_tokenizer is not None:
394
+ sem_feats: List[np.ndarray] = []
395
+ for w in all_speech_waveforms:
396
+ try:
397
+ # Expect [T, D] where T ≈ ceil(len(w)/compress_ratio)
398
+ sem = self.processor.semantic_tokenizer.encode(w)
399
+ sem = np.asarray(sem, dtype=np.float32)
400
+ except Exception:
401
+ sem = np.zeros((0, self.semantic_vae_dim), dtype=np.float32)
402
+ if sem.ndim != 2:
403
+ raise RuntimeError(f"Semantic tokenizer returned unexpected shape {sem.shape}. Expect [T, D].")
404
+ L = sem.shape[0]
405
+ D = sem.shape[1]
406
+ if D != self.semantic_vae_dim:
407
+ if D < self.semantic_vae_dim:
408
+ pad_d = np.zeros((L, self.semantic_vae_dim - D), dtype=np.float32)
409
+ sem = np.concatenate([sem, pad_d], axis=1)
410
+ else:
411
+ sem = sem[:, : self.semantic_vae_dim]
412
+ if L < max_latent_len:
413
+ pad = np.zeros((max_latent_len - L, self.semantic_vae_dim), dtype=np.float32)
414
+ sem = np.concatenate([sem, pad], axis=0)
415
+ elif L > max_latent_len:
416
+ sem = sem[:max_latent_len]
417
+ sem_feats.append(sem.astype(np.float32))
418
+ speech_semantic_tensors = torch.tensor(np.stack(sem_feats, axis=0), dtype=torch.float32)
419
+ else:
420
+ # Semantic tokenizer unavailable - use zero features as fallback
421
+ # این امکان برای preprocess مفید است که semantic_tokenizer بارگذاری نشود
422
+ if not self.compute_semantics:
423
+ # preprocess mode - صفر features
424
+ sem_feats = [np.zeros((max_latent_len, self.semantic_vae_dim), dtype=np.float32)
425
+ for _ in all_speech_waveforms]
426
+ speech_semantic_tensors = torch.tensor(np.stack(sem_feats, axis=0), dtype=torch.float32)
427
+ else:
428
+ # training mode - اگر compute_semantics = True باشد اما tokenizer نباشد، خطا
429
+ raise RuntimeError(
430
+ "Semantic features are required but could not be computed. "
431
+ "Ensure processor.semantic_tokenizer is available or set compute_semantics=False for preprocessing."
432
+ )
433
+ else:
434
+ speech_tensors_tensor = None
435
+ speech_masks_tensor = None
436
+ speeches_loss_input_tensor = None
437
+ speech_semantic_tensors = None # No segments in batch
438
+
439
+ if self.debug_checks:
440
+ assert (input_ids_tensor >= 0).all(), "input_ids contains negative indices"
441
+ if speech_tensors_tensor is not None:
442
+ assert speech_tensors_tensor.dim() == 2, "Expected speech_tensors 2D [segments, samples]"
443
+
444
+ return {
445
+ "input_ids": input_ids_tensor,
446
+ "attention_mask": attention_mask_tensor,
447
+ "speech_tensors": speech_tensors_tensor,
448
+ "speech_masks": speech_masks_tensor,
449
+ "speech_semantic_tensors": speech_semantic_tensors,
450
+ "acoustic_input_mask": acoustic_input_mask_tensor,
451
+ "acoustic_loss_mask": acoustic_loss_mask_tensor,
452
+ "speeches_loss_input": speeches_loss_input_tensor,
453
+ }
VibeVoice-finetuning/src/finetune_vibevoice_lora.py ADDED
@@ -0,0 +1,902 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_vibevoice_lora.py
2
+ import logging
3
+ import os
4
+ from dataclasses import dataclass, field
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from datasets import load_dataset, DatasetDict, VerificationMode
11
+
12
+ from transformers import (
13
+ HfArgumentParser,
14
+ Trainer,
15
+ set_seed,
16
+ TrainerCallback,
17
+ )
18
+ from transformers import TrainingArguments as HfTrainingArguments
19
+
20
+ from peft import LoraConfig, get_peft_model, TaskType
21
+
22
+ from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
23
+ from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
24
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
25
+
26
+ from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ # ================== SAMPLE CALLBACK UTILS ==================
31
+
32
+ import copy
33
+ import torch
34
+ from transformers import TrainerCallback
35
+
36
+ class EmaCallback(TrainerCallback):
37
+ def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cpu"):
38
+ """
39
+ attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
40
+ decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
41
+ """
42
+ self.attr_path = attr_path
43
+ self.decay = float(decay)
44
+ self.device = torch.device(device)
45
+ self.shadow = None
46
+ self._orig = None # store non-EMA weights when we swap
47
+
48
+ def _get_module(self, model):
49
+ # Resolve dotted path like "model.prediction_head"
50
+ mod = model
51
+ for name in self.attr_path.split('.'):
52
+ mod = getattr(mod, name)
53
+ return mod
54
+
55
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
56
+ head = self._get_module(model)
57
+ self.shadow = {k: p.detach().to(self.device).clone()
58
+ for k, p in head.state_dict().items()}
59
+
60
+ def on_step_end(self, args, state, control, model=None, **kwargs):
61
+ if self.shadow is None: return
62
+ head = self._get_module(model)
63
+ with torch.no_grad():
64
+ for k, v in head.state_dict().items():
65
+ self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
66
+
67
+ # ---- Swap helpers ----
68
+ def _swap_in_ema(self, model):
69
+ head = self._get_module(model)
70
+ self._orig = copy.deepcopy(head.state_dict())
71
+ head.load_state_dict(self.shadow, strict=False)
72
+
73
+ def _swap_back(self, model):
74
+ if self._orig is None: return
75
+ head = self._get_module(model)
76
+ head.load_state_dict(self._orig, strict=False)
77
+ self._orig = None
78
+
79
+ def on_evaluate(self, args, state, control, model=None, **kwargs):
80
+ # use EMA during eval
81
+ self._swap_in_ema(model)
82
+
83
+ def on_evaluate_end(self, args, state, control, model=None, **kwargs):
84
+ self._swap_back(model)
85
+
86
+ def on_save(self, args, state, control, model=None, **kwargs):
87
+ # temporarily swap to EMA, let Trainer save, then swap back
88
+ self._swap_in_ema(model)
89
+
90
+ def on_save_end(self, args, state, control, model=None, **kwargs):
91
+ self._swap_back(model)
92
+
93
+ def on_train_end(self, args, state, control, model=None, **kwargs):
94
+ # final checkpoint: persist EMA
95
+ self._swap_in_ema(model)
96
+
97
+
98
+ @dataclass
99
+ class ModelArguments:
100
+ model_name_or_path: Optional[str] = field(
101
+ default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
102
+ )
103
+ processor_name_or_path: Optional[str] = field(
104
+ default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
105
+ )
106
+ cache_dir: Optional[str] = field(default=None)
107
+ freeze_acoustic_tokenizer: bool = field(default=True)
108
+ freeze_semantic_tokenizer: bool = field(default=True)
109
+ lora_r: int = field(default=8)
110
+ lora_alpha: int = field(default=32)
111
+ lora_dropout: float = field(default=0.05)
112
+ lora_target_modules: str = field(
113
+ default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
114
+ metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
115
+ )
116
+ lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
117
+ train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
118
+ train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
119
+ layers_to_freeze: Optional[str] = field(
120
+ default=None,
121
+ metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
122
+ )
123
+
124
+ @dataclass
125
+ class DataArguments:
126
+ dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
127
+ dataset_config_name: Optional[str] = field(default=None)
128
+ train_split_name: str = field(default="train")
129
+ eval_split_name: Optional[str] = field(default="validation")
130
+ text_column_name: str = field(default="text")
131
+ audio_column_name: str = field(default="audio")
132
+ voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
133
+ eval_split_size: float = field(default=0.0)
134
+ ignore_verifications: bool = field(default=False)
135
+ max_length: Optional[int] = field(default=None)
136
+ train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
137
+ validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
138
+ voice_prompt_drop_rate: float = field(
139
+ default=0.0,
140
+ metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
141
+ )
142
+
143
+ @dataclass
144
+ class CustomTrainingArguments(HfTrainingArguments):
145
+ ddpm_batch_mul: int = field(default=1)
146
+ ce_loss_weight: float = field(default=1.0)
147
+ diffusion_loss_weight: float = field(default=1.0)
148
+ debug_ce_details: bool = field(default=False)
149
+ debug_ce_topk: int = field(default=5)
150
+ debug_ce_max_examples: int = field(default=1)
151
+ debug_ce_every_n_steps: int = field(default=200)
152
+ gradient_clipping: bool = field(
153
+ default=False,
154
+ metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
155
+ )
156
+ debug_save: bool = field(
157
+ default=False,
158
+ metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
159
+ )
160
+
161
+ def build_lora_config(args: ModelArguments) -> LoraConfig:
162
+ target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
163
+ return LoraConfig(
164
+ r=args.lora_r,
165
+ lora_alpha=args.lora_alpha,
166
+ lora_dropout=args.lora_dropout,
167
+ bias="none",
168
+ task_type=TaskType.CAUSAL_LM,
169
+ target_modules=target_modules,
170
+ )
171
+
172
+ def build_head_lora_config(args: ModelArguments) -> LoraConfig:
173
+ target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
174
+ return LoraConfig(
175
+ r=args.lora_r,
176
+ lora_alpha=args.lora_alpha,
177
+ lora_dropout=args.lora_dropout,
178
+ bias="none",
179
+ task_type=TaskType.FEATURE_EXTRACTION,
180
+ target_modules=target_modules,
181
+ )
182
+
183
+ def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
184
+ shifted = labels[:, 1:].contiguous()
185
+ base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
186
+ label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
187
+ final_mask = base_mask & (~label_is_acoustic)
188
+ out = shifted.clone()
189
+ out[~final_mask] = pad_id
190
+ return out
191
+
192
+ def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
193
+ try:
194
+ acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
195
+ if acoustic is None or not hasattr(acoustic, "encode"):
196
+ logger_.warning("No acoustic_tokenizer.encode() found to patch.")
197
+ return
198
+ base_encode = acoustic.encode
199
+ def encode_wrapped(*args, **kwargs):
200
+ out = base_encode(*args, **kwargs)
201
+ try:
202
+ _ = out[0][0]
203
+ return out
204
+ except Exception:
205
+ pass
206
+ if isinstance(out, dict):
207
+ for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
208
+ if k in out:
209
+ return [[out[k]]]
210
+ if len(out) > 0:
211
+ return [[next(iter(out.values()))]]
212
+ for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
213
+ if hasattr(out, attr):
214
+ return [[getattr(out, attr)]]
215
+ try:
216
+ if isinstance(out, torch.Tensor):
217
+ return [[out]]
218
+ except Exception:
219
+ pass
220
+ return [[out]]
221
+ acoustic.encode = encode_wrapped
222
+ logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
223
+ except Exception as e:
224
+ logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
225
+
226
+ def main() -> None:
227
+ parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
228
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
229
+
230
+ logging.basicConfig(
231
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
232
+ datefmt="%m/%d/%Y %H:%M:%S",
233
+ level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
234
+ )
235
+ logger.info("Training/evaluation parameters %s", training_args)
236
+ set_seed(training_args.seed)
237
+
238
+ # Configure gradient clipping
239
+ if not getattr(training_args, "gradient_clipping", False):
240
+ if hasattr(training_args, "max_grad_norm"):
241
+ training_args.max_grad_norm = 0.0
242
+ logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
243
+ else:
244
+ if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
245
+ training_args.max_grad_norm = 1.0
246
+ logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
247
+
248
+ # Load processor
249
+ processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
250
+ if processor_path is None:
251
+ raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
252
+ processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
253
+
254
+ # Required special tokens
255
+ tok = processor.tokenizer
256
+ for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
257
+ if not hasattr(tok, required) or getattr(tok, required) is None:
258
+ raise RuntimeError(f"Tokenizer missing required special id: {required}")
259
+
260
+ # Load model
261
+ if model_args.model_name_or_path is None:
262
+ raise ValueError("--model_name_or_path is required to load VibeVoice base model")
263
+ dtype = torch.float32
264
+ if training_args.bf16:
265
+ dtype = torch.bfloat16
266
+ elif getattr(training_args, "fp16", False):
267
+ dtype = torch.float16
268
+ model = VibeVoiceForConditionalGeneration.from_pretrained(
269
+ model_args.model_name_or_path,
270
+ torch_dtype=dtype,
271
+ )
272
+ _patch_acoustic_encode_for_legacy_indexing(model, logger)
273
+ processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
274
+
275
+ # Diagnostics: LM head tie
276
+ try:
277
+ in_emb_mod = model.get_input_embeddings()
278
+ out_emb_mod = model.get_output_embeddings()
279
+ in_w = getattr(in_emb_mod, "weight", None)
280
+ out_w = getattr(out_emb_mod, "weight", None)
281
+ shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
282
+ values_equal = False
283
+ if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
284
+ try:
285
+ values_equal = bool(torch.allclose(in_w, out_w))
286
+ except Exception:
287
+ values_equal = False
288
+ try:
289
+ tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
290
+ except Exception:
291
+ tie_cfg = getattr(model.config, "tie_word_embeddings", None)
292
+ logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
293
+ if out_w is not None:
294
+ logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
295
+ except Exception as e:
296
+ logger.warning(f"LM head tie diagnostics failed: {e}")
297
+
298
+ # Hard-tie LM head
299
+ try:
300
+ emb_module = model.get_input_embeddings()
301
+ head_module = model.get_output_embeddings()
302
+ if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
303
+ if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
304
+ with torch.no_grad():
305
+ head_module.weight = emb_module.weight
306
+ logger.info("Force-tied LM head weight to input embeddings (pointer share).")
307
+ except Exception as e:
308
+ logger.warning(f"Force-tie of LM head failed: {e}")
309
+
310
+ # Validate special IDs (info logs only)
311
+ try:
312
+ special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
313
+ try:
314
+ vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
315
+ except Exception:
316
+ vocab_size = 0
317
+ in_emb_mod = model.get_input_embeddings()
318
+ out_emb_mod = model.get_output_embeddings()
319
+ in_w = getattr(in_emb_mod, "weight", None)
320
+ out_w = getattr(out_emb_mod, "weight", None)
321
+ for name in special_names:
322
+ val = getattr(tok, name, None)
323
+ exists = (val is not None)
324
+ in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
325
+ equal_row = None
326
+ if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
327
+ try:
328
+ equal_row = bool(torch.allclose(in_w[val], out_w[val]))
329
+ except Exception:
330
+ equal_row = False
331
+ decoded_str = None
332
+ if exists and isinstance(val, int):
333
+ try:
334
+ decoded_str = tok.decode([val])
335
+ except Exception:
336
+ try:
337
+ decoded_str = tok.convert_ids_to_tokens(val)
338
+ except Exception:
339
+ decoded_str = "<decode_failed>"
340
+ logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
341
+ except Exception as e:
342
+ logger.warning(f"Special token ID/row validation failed: {e}")
343
+
344
+ # Quick tokenizer diagnostics (optional)
345
+ try:
346
+ logger.info("=== TOKENIZER DIAGNOSTICS ===")
347
+ logger.info(f"Tokenizer class: {type(tok).__name__}")
348
+ logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
349
+ # tiny CE smoke test
350
+ with torch.no_grad():
351
+ simple_text = "The cat sat on the mat."
352
+ simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
353
+ simple_mask = torch.ones_like(simple_ids)
354
+ x = model.get_input_embeddings()(simple_ids)
355
+ outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
356
+ logits = model.lm_head(outputs.last_hidden_state)
357
+ shift_logits = logits[:, :-1, :].contiguous()
358
+ shift_labels = simple_ids[:, 1:].contiguous()
359
+ ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
360
+ logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
361
+ except Exception as e:
362
+ logger.warning(f"Tokenizer diagnostics failed: {e}")
363
+
364
+ # Disable cache during training
365
+ if hasattr(model.config, "use_cache") and training_args.do_train:
366
+ model.config.use_cache = False
367
+
368
+ # Freeze tokenizers
369
+ if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
370
+ for p in model.model.acoustic_tokenizer.parameters():
371
+ p.requires_grad = False
372
+ if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
373
+ for p in model.model.semantic_tokenizer.parameters():
374
+ p.requires_grad = False
375
+
376
+ # LoRA wrap LLM (optional)
377
+ lora_cfg = build_lora_config(model_args)
378
+ tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
379
+ skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
380
+ if not skip_lm_lora:
381
+ model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
382
+ else:
383
+ logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
384
+
385
+ try:
386
+ model.tie_weights()
387
+ except Exception:
388
+ pass
389
+
390
+ # Freeze all then enable trainable subsets
391
+ for _, p in model.named_parameters():
392
+ p.requires_grad = False
393
+
394
+ try:
395
+ for n, p in model.model.language_model.named_parameters():
396
+ if "lora_A" in n or "lora_B" in n:
397
+ p.requires_grad = True
398
+ except Exception:
399
+ logger.warning("Could not re-enable LoRA params on language_model.")
400
+
401
+ # Diffusion head LoRA wrapping (optional)
402
+ if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
403
+ class _HeadForwardShim(nn.Module):
404
+ def __init__(self, base: nn.Module): super().__init__(); self.base = base
405
+ def forward(self, *args, **kwargs):
406
+ if len(args) >= 3:
407
+ noisy_images, timesteps, condition = args[:3]
408
+ else:
409
+ noisy_images = kwargs.get("noisy_images")
410
+ timesteps = kwargs.get("timesteps")
411
+ condition = kwargs.get("condition")
412
+ return self.base(noisy_images, timesteps, condition)
413
+ try:
414
+ shim = _HeadForwardShim(model.model.prediction_head)
415
+ model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
416
+ for n, p in model.model.prediction_head.named_parameters():
417
+ if "lora_A" in n or "lora_B" in n:
418
+ p.requires_grad = True
419
+ except Exception as e:
420
+ logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
421
+
422
+ # Train full diffusion head (optional)
423
+ if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
424
+ for p in model.model.prediction_head.parameters():
425
+ p.requires_grad = True
426
+
427
+ # Freeze diffusion head layers (optional)
428
+ if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
429
+ head_params = list(model.model.prediction_head.named_parameters())
430
+ try:
431
+ indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
432
+ frozen_count = 0
433
+ for i, (name, param) in enumerate(head_params):
434
+ if i in indices_to_freeze:
435
+ param.requires_grad = False
436
+ frozen_count += 1
437
+ logger.info(f"Froze layer [{i}]: {name}")
438
+ logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
439
+ except Exception as e:
440
+ logger.error(f"Could not parse --layers_to_freeze: {e}")
441
+ raise
442
+
443
+ # Connectors
444
+ if getattr(model_args, "train_connectors", False):
445
+ if hasattr(model.model, "acoustic_connector"):
446
+ for p in model.model.acoustic_connector.parameters():
447
+ p.requires_grad = True
448
+ if hasattr(model.model, "semantic_connector"):
449
+ for p in model.model.semantic_connector.parameters():
450
+ p.requires_grad = True
451
+ else:
452
+ if hasattr(model.model, "acoustic_connector"):
453
+ for p in model.model.acoustic_connector.parameters():
454
+ p.requires_grad = False
455
+ if hasattr(model.model, "semantic_connector"):
456
+ for p in model.model.semantic_connector.parameters():
457
+ p.requires_grad = False
458
+
459
+ # Freeze embedding + head
460
+ try:
461
+ emb = model.get_input_embeddings()
462
+ if hasattr(emb, "weight"):
463
+ emb.weight.requires_grad_(False)
464
+ head = model.get_output_embeddings()
465
+ if head is not None and hasattr(head, "weight"):
466
+ head.weight.requires_grad_(False)
467
+ except Exception:
468
+ pass
469
+
470
+ # Diagnostics
471
+ def _sum_params(named_iter):
472
+ return sum(p.numel() for _, p in named_iter if p.requires_grad)
473
+ try:
474
+ lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
475
+ pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
476
+ ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
477
+ se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
478
+ total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
479
+ logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
480
+ logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
481
+ except Exception:
482
+ pass
483
+
484
+ # Datasets
485
+ verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
486
+ if data_args.train_jsonl is not None:
487
+ data_files: Dict[str, str] = {"train": data_args.train_jsonl}
488
+ if data_args.validation_jsonl is not None:
489
+ data_files["validation"] = data_args.validation_jsonl
490
+ raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
491
+ else:
492
+ if data_args.dataset_name is None:
493
+ raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
494
+ raw = load_dataset(
495
+ data_args.dataset_name,
496
+ data_args.dataset_config_name,
497
+ verification_mode=verification_mode,
498
+ cache_dir=model_args.cache_dir,
499
+ )
500
+ train_ds = raw[data_args.train_split_name]
501
+ eval_ds = None
502
+ if training_args.do_eval:
503
+ if data_args.eval_split_name and data_args.eval_split_name in raw:
504
+ eval_ds = raw[data_args.eval_split_name]
505
+ elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
506
+ split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
507
+ train_ds, eval_ds = split["train"], split["test"]
508
+
509
+ train_dataset = VibeVoiceDataset(
510
+ train_ds,
511
+ text_column=data_args.text_column_name,
512
+ audio_column=data_args.audio_column_name,
513
+ voice_prompts_column=data_args.voice_prompts_column_name,
514
+ )
515
+ eval_dataset = None
516
+ if eval_ds is not None:
517
+ eval_dataset = VibeVoiceDataset(
518
+ eval_ds,
519
+ text_column=data_args.text_column_name,
520
+ audio_column=data_args.audio_column_name,
521
+ voice_prompts_column=data_args.voice_prompts_column_name,
522
+ )
523
+
524
+ # Ratios/dims from processor+model
525
+ speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
526
+ semantic_dim = getattr(model.config, "semantic_vae_dim", None)
527
+ if semantic_dim is None:
528
+ try:
529
+ semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
530
+ except Exception:
531
+ semantic_dim = 128
532
+
533
+ compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
534
+
535
+ data_collator = VibeVoiceCollator(
536
+ processor=processor,
537
+ max_length=data_args.max_length,
538
+ speech_compress_ratio=speech_compress_ratio,
539
+ semantic_vae_dim=semantic_dim,
540
+ compute_semantics=compute_semantics_flag,
541
+ debug_checks=False,
542
+ voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
543
+ )
544
+
545
+ class LoRADebugCallback(TrainerCallback):
546
+ def __init__(self, log_every_n_steps: int = 50):
547
+ self.log_every_n_steps = max(1, int(log_every_n_steps))
548
+ self.prev_param_norms: Dict[str, float] = {}
549
+ self.lora_param_names: List[str] = []
550
+
551
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
552
+ try:
553
+ if model is None:
554
+ return
555
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
556
+ self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
557
+ for n in self.lora_param_names:
558
+ p = named[n]
559
+ self.prev_param_norms[n] = float(p.data.norm().item())
560
+ total = len(self.lora_param_names)
561
+ req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
562
+ num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
563
+ num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
564
+ zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
565
+ logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
566
+ if total == 0:
567
+ logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
568
+ if req_grad != total:
569
+ logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
570
+ except Exception as e:
571
+ logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
572
+
573
+ def on_step_end(self, args, state, control, model=None, **kwargs):
574
+ try:
575
+ if model is None or len(self.lora_param_names) == 0:
576
+ return
577
+ step = int(getattr(state, "global_step", 0) or 0)
578
+ if step % self.log_every_n_steps != 0 and step != 1:
579
+ return
580
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
581
+ changed_A = 0
582
+ changed_B = 0
583
+ zero_B = 0
584
+ eps = 1e-12
585
+ for n in self.lora_param_names:
586
+ p = named.get(n, None)
587
+ if p is None:
588
+ continue
589
+ prev = self.prev_param_norms.get(n, 0.0)
590
+ curr = float(p.data.norm().item())
591
+ if "lora_A" in n and abs(curr - prev) > eps:
592
+ changed_A += 1
593
+ if "lora_B" in n:
594
+ if abs(curr - prev) > eps:
595
+ changed_B += 1
596
+ if curr == 0.0:
597
+ zero_B += 1
598
+ self.prev_param_norms[n] = curr
599
+ total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
600
+ total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
601
+ logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
602
+ except Exception as e:
603
+ logger.warning(f"LoRA debug (on_step_end) failed: {e}")
604
+
605
+ class VibeVoiceTrainer(Trainer):
606
+ def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
607
+ labels = inputs.get("input_ids")
608
+ attention_mask = inputs.get("attention_mask")
609
+ acoustic_input_mask = inputs.get("acoustic_input_mask")
610
+
611
+ # Ensure semantic tensors exist and have correct dtype/device
612
+ sem = inputs.get("speech_semantic_tensors", None)
613
+ try:
614
+ target_dtype = next(model.model.semantic_connector.parameters()).dtype
615
+ except Exception:
616
+ target_dtype = model.get_input_embeddings().weight.dtype
617
+
618
+ if sem is None:
619
+ sm = inputs.get("speech_masks")
620
+ if sm is not None:
621
+ zeros = torch.zeros(
622
+ sm.size(0), sm.size(1),
623
+ getattr(model.config, "semantic_vae_dim", 128),
624
+ dtype=target_dtype,
625
+ device=sm.device,
626
+ )
627
+ inputs["speech_semantic_tensors"] = zeros
628
+ else:
629
+ if isinstance(sem, torch.Tensor):
630
+ inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
631
+
632
+ outputs = model(
633
+ input_ids=inputs.get("input_ids"),
634
+ attention_mask=attention_mask,
635
+ speech_tensors=inputs.get("speech_tensors"),
636
+ speech_masks=inputs.get("speech_masks"),
637
+ speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
638
+ acoustic_input_mask=acoustic_input_mask,
639
+ acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
640
+ speeches_loss_input=inputs.get("speeches_loss_input"),
641
+ ddpm_batch_mul=training_args.ddpm_batch_mul,
642
+ )
643
+
644
+ # Invariants: token/latent selection equality across views (warn, don't assert)
645
+ try:
646
+ al_mask = inputs.get("acoustic_loss_mask")
647
+ sp_masks = inputs.get("speech_masks")
648
+ sp_loss_sel = inputs.get("speeches_loss_input")
649
+ num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
650
+ num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
651
+ num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
652
+ num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
653
+ self.log({
654
+ "debug/num_tok_total": float(num_tok_total),
655
+ "debug/num_tok_loss": float(num_tok_loss),
656
+ "debug/num_lat_total": float(num_lat_total),
657
+ "debug/num_lat_loss": float(num_lat_loss),
658
+ })
659
+ if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
660
+ if num_tok_loss != num_lat_loss:
661
+ logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
662
+ except Exception:
663
+ pass
664
+
665
+ # CE Loss
666
+ logits = outputs.logits
667
+ ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
668
+ shift_logits = logits[:, :-1, :].contiguous()
669
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
670
+ ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
671
+
672
+ # Optional CE diagnostics
673
+ try:
674
+ self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
675
+ except Exception as e:
676
+ logger.warning(f"Failed invoking CE debug: {e}")
677
+
678
+ # Diffusion loss
679
+ diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
680
+ total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
681
+
682
+ # Logs
683
+ try:
684
+ prefix = "train" if model.training else "eval"
685
+ self.log({
686
+ f"{prefix}/ce_loss": ce_loss.detach().item(),
687
+ f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
688
+ })
689
+ if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
690
+ lr_val = self.optimizer.param_groups[0].get("lr", None)
691
+ if lr_val is not None:
692
+ self.log({"train/learning_rate_real": float(lr_val)})
693
+ except Exception:
694
+ pass
695
+
696
+ return (total, outputs) if return_outputs else total
697
+
698
+ def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
699
+ try:
700
+ if not getattr(training_args, "debug_ce_details", False):
701
+ return
702
+ step = int(getattr(self.state, "global_step", 0) or 0)
703
+ every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
704
+ if not (step <= 1 or (step % every_n == 0)):
705
+ return
706
+
707
+ with torch.no_grad():
708
+ vocab = shift_logits.size(-1)
709
+ per_token_loss = F.cross_entropy(
710
+ shift_logits.view(-1, vocab),
711
+ ce_labels.view(-1),
712
+ reduction="none",
713
+ ignore_index=-100,
714
+ ).view_as(ce_labels)
715
+
716
+ valid_mask = ce_labels.ne(-100)
717
+ num_valid = int(valid_mask.sum().item())
718
+ avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
719
+
720
+ per_ex_avgs = []
721
+ max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
722
+ B = ce_labels.size(0)
723
+ for b in range(min(B, max_examples)):
724
+ vb = valid_mask[b]
725
+ if int(vb.sum().item()) > 0:
726
+ per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
727
+ else:
728
+ per_ex_avgs.append(float("nan"))
729
+ logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
730
+ except Exception as e:
731
+ logger.warning(f"CE detailed debug failed: {e}")
732
+
733
+ # --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
734
+
735
+
736
+ def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
737
+ try:
738
+ target_dir = output_dir or self.args.output_dir
739
+ lora_out = os.path.join(target_dir, "lora")
740
+ os.makedirs(lora_out, exist_ok=True)
741
+
742
+ # --- LLM PEFT adapters (if LoRA-wrapped) ---
743
+ language_model = getattr(self.model.model, "language_model", None)
744
+ if hasattr(language_model, "save_pretrained"):
745
+ language_model.save_pretrained(lora_out)
746
+
747
+ # --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
748
+ pred_head = getattr(self.model.model, "prediction_head", None)
749
+ if hasattr(pred_head, "save_pretrained"):
750
+ ph_dir = os.path.join(lora_out, "diffusion_head")
751
+ os.makedirs(ph_dir, exist_ok=True)
752
+ pred_head.save_pretrained(ph_dir)
753
+
754
+ # --- ALWAYS save FULL diffusion head state_dict for fallback ---
755
+ if pred_head is not None and hasattr(pred_head, "state_dict"):
756
+ sd = pred_head.state_dict()
757
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
758
+ ph_dir = os.path.join(lora_out, "diffusion_head")
759
+ os.makedirs(ph_dir, exist_ok=True)
760
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
761
+
762
+ # --- Connectors (plain state_dicts) ---
763
+ ac = getattr(self.model.model, "acoustic_connector", None)
764
+ if ac is not None:
765
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
766
+ os.makedirs(ac_dir, exist_ok=True)
767
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
768
+
769
+ se = getattr(self.model.model, "semantic_connector", None)
770
+ if se is not None:
771
+ se_dir = os.path.join(lora_out, "semantic_connector")
772
+ os.makedirs(se_dir, exist_ok=True)
773
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
774
+
775
+ except Exception as e:
776
+ logger.warning(f"Failed to save LoRA assets: {e}")
777
+
778
+
779
+ # ------------- Build the Trainer -------------
780
+
781
+ # Resolve which adapters to apply in samples
782
+
783
+ ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cpu")
784
+
785
+ trainer = VibeVoiceTrainer(
786
+ model=model,
787
+ args=training_args,
788
+ train_dataset=train_dataset,
789
+ eval_dataset=eval_dataset,
790
+ data_collator=data_collator,
791
+ callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
792
+ )
793
+
794
+ # Optional debug pre-training save
795
+ if getattr(training_args, "debug_save", False):
796
+ try:
797
+ debug_dir = os.path.join(training_args.output_dir, "debug_initial")
798
+ lora_out = os.path.join(debug_dir, "lora")
799
+ os.makedirs(lora_out, exist_ok=True)
800
+ logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
801
+ # language model adapters / base
802
+ try:
803
+ if hasattr(model.model.language_model, "save_pretrained"):
804
+ model.model.language_model.save_pretrained(lora_out)
805
+ except Exception as e_lm:
806
+ logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
807
+ # diffusion head
808
+ try:
809
+ if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
810
+ model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
811
+ except Exception as e_head:
812
+ logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
813
+ # NEW: full diffusion head state_dict as fallback
814
+ try:
815
+ ph = getattr(model.model, "prediction_head", None)
816
+ if ph is not None and hasattr(ph, "state_dict"):
817
+ sd = ph.state_dict()
818
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
819
+ os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
820
+ torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
821
+ except Exception as e:
822
+ logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
823
+ # connectors
824
+ try:
825
+ ac_conn = getattr(model.model, "acoustic_connector", None)
826
+ if ac_conn is not None:
827
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
828
+ os.makedirs(ac_dir, exist_ok=True)
829
+ torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
830
+ except Exception as e_ac:
831
+ logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
832
+ try:
833
+ se_conn = getattr(model.model, "semantic_connector", None)
834
+ if se_conn is not None:
835
+ se_dir = os.path.join(lora_out, "semantic_connector")
836
+ os.makedirs(se_dir, exist_ok=True)
837
+ torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
838
+ except Exception as e_se:
839
+ logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
840
+ except Exception as e:
841
+ logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
842
+
843
+ if getattr(training_args, "gradient_checkpointing", False):
844
+ try:
845
+ model.gradient_checkpointing_enable()
846
+ except Exception:
847
+ logger.warning("Failed to enable gradient checkpointing on the model.")
848
+
849
+ if training_args.do_train:
850
+ trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
851
+
852
+ lora_out = os.path.join(training_args.output_dir, "lora")
853
+ os.makedirs(lora_out, exist_ok=True)
854
+
855
+ # LLM PEFT (if any)
856
+ lm = getattr(model.model, "language_model", None)
857
+ if hasattr(lm, "save_pretrained"):
858
+ lm.save_pretrained(lora_out)
859
+
860
+ # Diffusion head PEFT (if any)
861
+ ph = getattr(model.model, "prediction_head", None)
862
+ if hasattr(ph, "save_pretrained"):
863
+ ph_dir = os.path.join(lora_out, "diffusion_head")
864
+ os.makedirs(ph_dir, exist_ok=True)
865
+ ph.save_pretrained(ph_dir)
866
+
867
+ # ALWAYS: full diffusion head state_dict fallback
868
+ try:
869
+ if ph is not None and hasattr(ph, "state_dict"):
870
+ sd = ph.state_dict()
871
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
872
+ ph_dir = os.path.join(lora_out, "diffusion_head")
873
+ os.makedirs(ph_dir, exist_ok=True)
874
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
875
+ except Exception as e:
876
+ logger.warning(f"Failed to save FULL diffusion head at end: {e}")
877
+
878
+ # Connectors (if trained)
879
+ try:
880
+ ac = getattr(model.model, "acoustic_connector", None)
881
+ if ac is not None:
882
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
883
+ os.makedirs(ac_dir, exist_ok=True)
884
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
885
+ except Exception as e:
886
+ logger.warning(f"Failed to save acoustic_connector: {e}")
887
+
888
+ try:
889
+ se = getattr(model.model, "semantic_connector", None)
890
+ if se is not None:
891
+ se_dir = os.path.join(lora_out, "semantic_connector")
892
+ os.makedirs(se_dir, exist_ok=True)
893
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
894
+ except Exception as e:
895
+ logger.warning(f"Failed to save semantic_connector: {e}")
896
+
897
+ if training_args.do_eval and eval_dataset is not None:
898
+ trainer.evaluate()
899
+
900
+
901
+ if __name__ == "__main__":
902
+ main()
VibeVoice-finetuning/src/finetune_vibevoice_lora0.py ADDED
@@ -0,0 +1,984 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_vibevoice_lora.py
2
+ import os
3
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
4
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
5
+
6
+ import logging
7
+ import os
8
+ from dataclasses import dataclass, field
9
+ from typing import Any, Dict, List, Optional, Tuple
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from datasets import load_dataset, DatasetDict, VerificationMode
15
+
16
+ from transformers import (
17
+ HfArgumentParser,
18
+ Trainer,
19
+ set_seed,
20
+ TrainerCallback,
21
+ )
22
+ from transformers import TrainingArguments as HfTrainingArguments
23
+
24
+ from peft import LoraConfig, get_peft_model, TaskType
25
+
26
+ from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
27
+ from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
28
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
29
+
30
+ from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ # ================== SAMPLE CALLBACK UTILS ==================
35
+
36
+ import copy
37
+ import torch
38
+ from transformers import TrainerCallback
39
+
40
+ class EmaCallback(TrainerCallback):
41
+ def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cuda"):
42
+ """
43
+ attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
44
+ decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
45
+ """
46
+ self.attr_path = attr_path
47
+ self.decay = float(decay)
48
+ self.device = torch.device(device)
49
+ self.shadow = None
50
+ self._orig = None # store non-EMA weights when we swap
51
+
52
+ def _get_module(self, model):
53
+ # Resolve dotted path like "model.prediction_head"
54
+ mod = model
55
+ for name in self.attr_path.split('.'):
56
+ mod = getattr(mod, name)
57
+ return mod
58
+
59
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
60
+ head = self._get_module(model)
61
+ self.shadow = {k: p.detach().to(self.device).clone()
62
+ for k, p in head.state_dict().items()}
63
+
64
+ def on_step_end(self, args, state, control, model=None, **kwargs):
65
+ if self.shadow is None: return
66
+ head = self._get_module(model)
67
+ with torch.no_grad():
68
+ for k, v in head.state_dict().items():
69
+ self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
70
+
71
+ # ---- Swap helpers ----
72
+ def _swap_in_ema(self, model):
73
+ head = self._get_module(model)
74
+ self._orig = copy.deepcopy(head.state_dict())
75
+ head.load_state_dict(self.shadow, strict=False)
76
+
77
+ def _swap_back(self, model):
78
+ if self._orig is None: return
79
+ head = self._get_module(model)
80
+ head.load_state_dict(self._orig, strict=False)
81
+ self._orig = None
82
+
83
+ def on_evaluate(self, args, state, control, model=None, **kwargs):
84
+ # use EMA during eval
85
+ self._swap_in_ema(model)
86
+
87
+ def on_evaluate_end(self, args, state, control, model=None, **kwargs):
88
+ self._swap_back(model)
89
+
90
+ def on_save(self, args, state, control, model=None, **kwargs):
91
+ # temporarily swap to EMA, let Trainer save, then swap back
92
+ self._swap_in_ema(model)
93
+
94
+ def on_save_end(self, args, state, control, model=None, **kwargs):
95
+ self._swap_back(model)
96
+
97
+ def on_train_end(self, args, state, control, model=None, **kwargs):
98
+ # final checkpoint: persist EMA
99
+ self._swap_in_ema(model)
100
+
101
+
102
+ @dataclass
103
+ class ModelArguments:
104
+ model_name_or_path: Optional[str] = field(
105
+ default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
106
+ )
107
+ processor_name_or_path: Optional[str] = field(
108
+ default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
109
+ )
110
+ cache_dir: Optional[str] = field(default=None)
111
+ freeze_acoustic_tokenizer: bool = field(default=True)
112
+ freeze_semantic_tokenizer: bool = field(default=True)
113
+ lora_r: int = field(default=8)
114
+ lora_alpha: int = field(default=32)
115
+ lora_dropout: float = field(default=0.05)
116
+ lora_target_modules: str = field(
117
+ default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
118
+ metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
119
+ )
120
+ lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
121
+ train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
122
+ train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
123
+ layers_to_freeze: Optional[str] = field(
124
+ default=None,
125
+ metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
126
+ )
127
+
128
+ @dataclass
129
+ class DataArguments:
130
+ dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
131
+ dataset_config_name: Optional[str] = field(default=None)
132
+ train_split_name: str = field(default="train")
133
+ eval_split_name: Optional[str] = field(default="validation")
134
+ text_column_name: str = field(default="text")
135
+ audio_column_name: str = field(default="audio")
136
+ voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
137
+ eval_split_size: float = field(default=0.0)
138
+ ignore_verifications: bool = field(default=False)
139
+ max_length: Optional[int] = field(default=None)
140
+ train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
141
+ validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
142
+ voice_prompt_drop_rate: float = field(
143
+ default=0.0,
144
+ metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
145
+ )
146
+
147
+ @dataclass
148
+ class CustomTrainingArguments(HfTrainingArguments):
149
+ ddpm_batch_mul: int = field(default=1)
150
+ ce_loss_weight: float = field(default=1.0)
151
+ diffusion_loss_weight: float = field(default=1.0)
152
+ debug_ce_details: bool = field(default=False)
153
+ debug_ce_topk: int = field(default=5)
154
+ debug_ce_max_examples: int = field(default=1)
155
+ debug_ce_every_n_steps: int = field(default=200)
156
+ gradient_clipping: bool = field(
157
+ default=False,
158
+ metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
159
+ )
160
+ debug_save: bool = field(
161
+ default=False,
162
+ metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
163
+ )
164
+
165
+ def build_lora_config(args: ModelArguments) -> LoraConfig:
166
+ target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
167
+ return LoraConfig(
168
+ r=args.lora_r,
169
+ lora_alpha=args.lora_alpha,
170
+ lora_dropout=args.lora_dropout,
171
+ bias="none",
172
+ task_type=TaskType.CAUSAL_LM,
173
+ target_modules=target_modules,
174
+ )
175
+
176
+ def build_head_lora_config(args: ModelArguments) -> LoraConfig:
177
+ target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
178
+ return LoraConfig(
179
+ r=args.lora_r,
180
+ lora_alpha=args.lora_alpha,
181
+ lora_dropout=args.lora_dropout,
182
+ bias="none",
183
+ task_type=TaskType.FEATURE_EXTRACTION,
184
+ target_modules=target_modules,
185
+ )
186
+
187
+ def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
188
+ shifted = labels[:, 1:].contiguous()
189
+ base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
190
+ label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
191
+ final_mask = base_mask & (~label_is_acoustic)
192
+ out = shifted.clone()
193
+ out[~final_mask] = pad_id
194
+ return out
195
+
196
+ def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
197
+ try:
198
+ acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
199
+ if acoustic is None or not hasattr(acoustic, "encode"):
200
+ logger_.warning("No acoustic_tokenizer.encode() found to patch.")
201
+ return
202
+ base_encode = acoustic.encode
203
+ def encode_wrapped(*args, **kwargs):
204
+ out = base_encode(*args, **kwargs)
205
+ try:
206
+ _ = out[0][0]
207
+ return out
208
+ except Exception:
209
+ pass
210
+ if isinstance(out, dict):
211
+ for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
212
+ if k in out:
213
+ return [[out[k]]]
214
+ if len(out) > 0:
215
+ return [[next(iter(out.values()))]]
216
+ for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
217
+ if hasattr(out, attr):
218
+ return [[getattr(out, attr)]]
219
+ try:
220
+ if isinstance(out, torch.Tensor):
221
+ return [[out]]
222
+ except Exception:
223
+ pass
224
+ return [[out]]
225
+ acoustic.encode = encode_wrapped
226
+ logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
227
+ except Exception as e:
228
+ logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
229
+
230
+ def main() -> None:
231
+ parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
232
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
233
+
234
+ logging.basicConfig(
235
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
236
+ datefmt="%m/%d/%Y %H:%M:%S",
237
+ level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
238
+ )
239
+ logger.info("Training/evaluation parameters %s", training_args)
240
+ set_seed(training_args.seed)
241
+
242
+ # Configure gradient clipping
243
+ if not getattr(training_args, "gradient_clipping", False):
244
+ if hasattr(training_args, "max_grad_norm"):
245
+ training_args.max_grad_norm = 0.0
246
+ logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
247
+ else:
248
+ if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
249
+ training_args.max_grad_norm = 1.0
250
+ logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
251
+
252
+ # Load processor
253
+ processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
254
+ if processor_path is None:
255
+ raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
256
+ processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
257
+
258
+ # Required special tokens
259
+ tok = processor.tokenizer
260
+ for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
261
+ if not hasattr(tok, required) or getattr(tok, required) is None:
262
+ raise RuntimeError(f"Tokenizer missing required special id: {required}")
263
+
264
+ # Load model
265
+ if model_args.model_name_or_path is None:
266
+ raise ValueError("--model_name_or_path is required to load VibeVoice base model")
267
+ dtype = torch.float32
268
+ if training_args.bf16:
269
+ dtype = torch.bfloat16
270
+ elif getattr(training_args, "fp16", False):
271
+ dtype = torch.float16
272
+ model = VibeVoiceForConditionalGeneration.from_pretrained(
273
+ model_args.model_name_or_path,
274
+ torch_dtype=dtype, device_map={"": 0},
275
+ )
276
+ _patch_acoustic_encode_for_legacy_indexing(model, logger)
277
+ processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
278
+
279
+ # Diagnostics: LM head tie
280
+ try:
281
+ in_emb_mod = model.get_input_embeddings()
282
+ out_emb_mod = model.get_output_embeddings()
283
+ in_w = getattr(in_emb_mod, "weight", None)
284
+ out_w = getattr(out_emb_mod, "weight", None)
285
+ shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
286
+ values_equal = False
287
+ if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
288
+ try:
289
+ values_equal = bool(torch.allclose(in_w, out_w))
290
+ except Exception:
291
+ values_equal = False
292
+ try:
293
+ tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
294
+ except Exception:
295
+ tie_cfg = getattr(model.config, "tie_word_embeddings", None)
296
+ logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
297
+ if out_w is not None:
298
+ logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
299
+ except Exception as e:
300
+ logger.warning(f"LM head tie diagnostics failed: {e}")
301
+
302
+ # Hard-tie LM head
303
+ try:
304
+ emb_module = model.get_input_embeddings()
305
+ head_module = model.get_output_embeddings()
306
+ if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
307
+ if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
308
+ with torch.no_grad():
309
+ head_module.weight = emb_module.weight
310
+ logger.info("Force-tied LM head weight to input embeddings (pointer share).")
311
+ except Exception as e:
312
+ logger.warning(f"Force-tie of LM head failed: {e}")
313
+
314
+ # Validate special IDs (info logs only)
315
+ try:
316
+ special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
317
+ try:
318
+ vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
319
+ except Exception:
320
+ vocab_size = 0
321
+ in_emb_mod = model.get_input_embeddings()
322
+ out_emb_mod = model.get_output_embeddings()
323
+ in_w = getattr(in_emb_mod, "weight", None)
324
+ out_w = getattr(out_emb_mod, "weight", None)
325
+ for name in special_names:
326
+ val = getattr(tok, name, None)
327
+ exists = (val is not None)
328
+ in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
329
+ equal_row = None
330
+ if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
331
+ try:
332
+ equal_row = bool(torch.allclose(in_w[val], out_w[val]))
333
+ except Exception:
334
+ equal_row = False
335
+ decoded_str = None
336
+ if exists and isinstance(val, int):
337
+ try:
338
+ decoded_str = tok.decode([val])
339
+ except Exception:
340
+ try:
341
+ decoded_str = tok.convert_ids_to_tokens(val)
342
+ except Exception:
343
+ decoded_str = "<decode_failed>"
344
+ logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
345
+ except Exception as e:
346
+ logger.warning(f"Special token ID/row validation failed: {e}")
347
+
348
+ # Quick tokenizer diagnostics (optional)
349
+ try:
350
+ logger.info("=== TOKENIZER DIAGNOSTICS ===")
351
+ logger.info(f"Tokenizer class: {type(tok).__name__}")
352
+ logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
353
+ # tiny CE smoke test
354
+ with torch.no_grad():
355
+ simple_text = "The cat sat on the mat."
356
+ simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
357
+ simple_mask = torch.ones_like(simple_ids)
358
+ x = model.get_input_embeddings()(simple_ids)
359
+ outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
360
+ logits = model.lm_head(outputs.last_hidden_state)
361
+ shift_logits = logits[:, :-1, :].contiguous()
362
+ shift_labels = simple_ids[:, 1:].contiguous()
363
+ ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
364
+ logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
365
+ except Exception as e:
366
+ logger.warning(f"Tokenizer diagnostics failed: {e}")
367
+
368
+ # Disable cache during training
369
+ if hasattr(model.config, "use_cache") and training_args.do_train:
370
+ model.config.use_cache = False
371
+
372
+ # Freeze tokenizers
373
+ if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
374
+ for p in model.model.acoustic_tokenizer.parameters():
375
+ p.requires_grad = False
376
+ if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
377
+ for p in model.model.semantic_tokenizer.parameters():
378
+ p.requires_grad = False
379
+
380
+ # LoRA wrap LLM (optional)
381
+ lora_cfg = build_lora_config(model_args)
382
+ tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
383
+ skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
384
+ if not skip_lm_lora:
385
+ model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
386
+ else:
387
+ logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
388
+
389
+ try:
390
+ model.tie_weights()
391
+ except Exception:
392
+ pass
393
+
394
+ # Freeze all then enable trainable subsets
395
+ for _, p in model.named_parameters():
396
+ p.requires_grad = False
397
+
398
+ try:
399
+ for n, p in model.model.language_model.named_parameters():
400
+ if "lora_A" in n or "lora_B" in n:
401
+ p.requires_grad = True
402
+ except Exception:
403
+ logger.warning("Could not re-enable LoRA params on language_model.")
404
+
405
+ # Diffusion head LoRA wrapping (optional)
406
+ if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
407
+ class _HeadForwardShim(nn.Module):
408
+ def __init__(self, base: nn.Module): super().__init__(); self.base = base
409
+ def forward(self, *args, **kwargs):
410
+ if len(args) >= 3:
411
+ noisy_images, timesteps, condition = args[:3]
412
+ else:
413
+ noisy_images = kwargs.get("noisy_images")
414
+ timesteps = kwargs.get("timesteps")
415
+ condition = kwargs.get("condition")
416
+ return self.base(noisy_images, timesteps, condition)
417
+ try:
418
+ shim = _HeadForwardShim(model.model.prediction_head)
419
+ model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
420
+ for n, p in model.model.prediction_head.named_parameters():
421
+ if "lora_A" in n or "lora_B" in n:
422
+ p.requires_grad = True
423
+ except Exception as e:
424
+ logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
425
+
426
+ # Train full diffusion head (optional)
427
+ if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
428
+ for p in model.model.prediction_head.parameters():
429
+ p.requires_grad = True
430
+
431
+ # Freeze diffusion head layers (optional)
432
+ if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
433
+ head_params = list(model.model.prediction_head.named_parameters())
434
+ try:
435
+ indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
436
+ frozen_count = 0
437
+ for i, (name, param) in enumerate(head_params):
438
+ if i in indices_to_freeze:
439
+ param.requires_grad = False
440
+ frozen_count += 1
441
+ logger.info(f"Froze layer [{i}]: {name}")
442
+ logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
443
+ except Exception as e:
444
+ logger.error(f"Could not parse --layers_to_freeze: {e}")
445
+ raise
446
+
447
+ # Connectors
448
+ if getattr(model_args, "train_connectors", False):
449
+ if hasattr(model.model, "acoustic_connector"):
450
+ for p in model.model.acoustic_connector.parameters():
451
+ p.requires_grad = True
452
+ if hasattr(model.model, "semantic_connector"):
453
+ for p in model.model.semantic_connector.parameters():
454
+ p.requires_grad = True
455
+ else:
456
+ if hasattr(model.model, "acoustic_connector"):
457
+ for p in model.model.acoustic_connector.parameters():
458
+ p.requires_grad = False
459
+ if hasattr(model.model, "semantic_connector"):
460
+ for p in model.model.semantic_connector.parameters():
461
+ p.requires_grad = False
462
+
463
+ # Freeze embedding + head
464
+ try:
465
+ emb = model.get_input_embeddings()
466
+ if hasattr(emb, "weight"):
467
+ emb.weight.requires_grad_(False)
468
+ head = model.get_output_embeddings()
469
+ if head is not None and hasattr(head, "weight"):
470
+ head.weight.requires_grad_(False)
471
+ except Exception:
472
+ pass
473
+
474
+ # Diagnostics
475
+ def _sum_params(named_iter):
476
+ return sum(p.numel() for _, p in named_iter if p.requires_grad)
477
+ try:
478
+ lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
479
+ pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
480
+ ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
481
+ se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
482
+ total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
483
+ logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
484
+ logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
485
+ except Exception:
486
+ pass
487
+
488
+ # Preprocessed data classes
489
+ class PreprocessedBatchDataset:
490
+ def __init__(self, preprocessed_file: str):
491
+ self.data = torch.load(preprocessed_file, map_location='cpu')
492
+ logger.info(f"Loaded {len(self.data)} preprocessed batches from {preprocessed_file}")
493
+
494
+ def __len__(self):
495
+ return len(self.data)
496
+
497
+ def __getitem__(self, idx):
498
+ batch = self.data[idx]
499
+ result = {}
500
+ for k, v in batch.items():
501
+ if isinstance(v, torch.Tensor):
502
+ result[k] = v
503
+ else:
504
+ result[k] = v
505
+ return result
506
+
507
+ class PreprocessedBatchSubset:
508
+ def __init__(self, dataset: 'PreprocessedBatchDataset', indices: List[int]):
509
+ self.dataset = dataset
510
+ self.indices = indices
511
+
512
+ def __len__(self):
513
+ return len(self.indices)
514
+
515
+ def __getitem__(self, idx):
516
+ actual_idx = self.indices[idx]
517
+ return self.dataset[actual_idx]
518
+
519
+ class PreprocessedBatchCollator:
520
+ def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
521
+ if not batch:
522
+ return {}
523
+ result = {}
524
+ for key in batch[0].keys():
525
+ tensors = [b[key] for b in batch if b[key] is not None]
526
+ if tensors and isinstance(tensors[0], torch.Tensor):
527
+ result[key] = torch.cat(tensors, dim=0)
528
+ else:
529
+ result[key] = tensors[0] if tensors else None
530
+ return result
531
+
532
+ # Datasets
533
+ preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
534
+ preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
535
+
536
+ if os.path.exists(preprocessed_file):
537
+ logger.info(f"Loading preprocessed data from {preprocessed_file}")
538
+ preprocessed_data = PreprocessedBatchDataset(preprocessed_file)
539
+
540
+ train_dataset = preprocessed_data
541
+ eval_dataset = None
542
+
543
+ if training_args.do_eval and data_args.eval_split_size and data_args.eval_split_size > 0 and len(preprocessed_data) > 1:
544
+ num_eval = max(1, int(len(preprocessed_data) * data_args.eval_split_size))
545
+ num_train = len(preprocessed_data) - num_eval
546
+ indices = list(range(len(preprocessed_data)))
547
+ import random
548
+ random.Random(training_args.seed).shuffle(indices)
549
+ train_indices = indices[:num_train]
550
+ eval_indices = indices[num_train:]
551
+ train_dataset = PreprocessedBatchSubset(preprocessed_data, train_indices)
552
+ eval_dataset = PreprocessedBatchSubset(preprocessed_data, eval_indices)
553
+ else:
554
+ logger.info(f"Preprocessed data not found at {preprocessed_file}, loading from raw JSONL/HF datasets")
555
+ verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
556
+ if data_args.train_jsonl is not None:
557
+ data_files: Dict[str, str] = {"train": data_args.train_jsonl}
558
+ if data_args.validation_jsonl is not None:
559
+ data_files["validation"] = data_args.validation_jsonl
560
+ raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
561
+ else:
562
+ if data_args.dataset_name is None:
563
+ raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
564
+ raw = load_dataset(
565
+ data_args.dataset_name,
566
+ data_args.dataset_config_name,
567
+ verification_mode=verification_mode,
568
+ cache_dir=model_args.cache_dir,
569
+ )
570
+ train_ds = raw[data_args.train_split_name]
571
+ eval_ds = None
572
+ if training_args.do_eval:
573
+ if data_args.eval_split_name and data_args.eval_split_name in raw:
574
+ eval_ds = raw[data_args.eval_split_name]
575
+ elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
576
+ split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
577
+ train_ds, eval_ds = split["train"], split["test"]
578
+
579
+ train_dataset = VibeVoiceDataset(
580
+ train_ds,
581
+ text_column=data_args.text_column_name,
582
+ audio_column=data_args.audio_column_name,
583
+ voice_prompts_column=data_args.voice_prompts_column_name,
584
+ )
585
+ eval_dataset = None
586
+ if eval_ds is not None:
587
+ eval_dataset = VibeVoiceDataset(
588
+ eval_ds,
589
+ text_column=data_args.text_column_name,
590
+ audio_column=data_args.audio_column_name,
591
+ voice_prompts_column=data_args.voice_prompts_column_name,
592
+ )
593
+
594
+ # Ratios/dims from processor+model
595
+ speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
596
+ semantic_dim = getattr(model.config, "semantic_vae_dim", None)
597
+ if semantic_dim is None:
598
+ try:
599
+ semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
600
+ except Exception:
601
+ semantic_dim = 128
602
+
603
+ compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
604
+
605
+ if os.path.exists(preprocessed_file):
606
+ data_collator = PreprocessedBatchCollator()
607
+ else:
608
+ data_collator = VibeVoiceCollator(
609
+ processor=processor,
610
+ max_length=data_args.max_length,
611
+ speech_compress_ratio=speech_compress_ratio,
612
+ semantic_vae_dim=semantic_dim,
613
+ compute_semantics=compute_semantics_flag,
614
+ debug_checks=False,
615
+ voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
616
+ )
617
+
618
+ class LoRADebugCallback(TrainerCallback):
619
+ def __init__(self, log_every_n_steps: int = 50):
620
+ self.log_every_n_steps = max(1, int(log_every_n_steps))
621
+ self.prev_param_norms: Dict[str, float] = {}
622
+ self.lora_param_names: List[str] = []
623
+
624
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
625
+ try:
626
+ if model is None:
627
+ return
628
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
629
+ self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
630
+ for n in self.lora_param_names:
631
+ p = named[n]
632
+ self.prev_param_norms[n] = float(p.data.norm().item())
633
+ total = len(self.lora_param_names)
634
+ req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
635
+ num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
636
+ num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
637
+ zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
638
+ logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
639
+ if total == 0:
640
+ logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
641
+ if req_grad != total:
642
+ logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
643
+ except Exception as e:
644
+ logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
645
+
646
+ def on_step_end(self, args, state, control, model=None, **kwargs):
647
+ try:
648
+ if model is None or len(self.lora_param_names) == 0:
649
+ return
650
+ step = int(getattr(state, "global_step", 0) or 0)
651
+ if step % self.log_every_n_steps != 0 and step != 1:
652
+ return
653
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
654
+ changed_A = 0
655
+ changed_B = 0
656
+ zero_B = 0
657
+ eps = 1e-12
658
+ for n in self.lora_param_names:
659
+ p = named.get(n, None)
660
+ if p is None:
661
+ continue
662
+ prev = self.prev_param_norms.get(n, 0.0)
663
+ curr = float(p.data.norm().item())
664
+ if "lora_A" in n and abs(curr - prev) > eps:
665
+ changed_A += 1
666
+ if "lora_B" in n:
667
+ if abs(curr - prev) > eps:
668
+ changed_B += 1
669
+ if curr == 0.0:
670
+ zero_B += 1
671
+ self.prev_param_norms[n] = curr
672
+ total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
673
+ total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
674
+ logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
675
+ except Exception as e:
676
+ logger.warning(f"LoRA debug (on_step_end) failed: {e}")
677
+
678
+ class VibeVoiceTrainer(Trainer):
679
+ def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
680
+ labels = inputs.get("input_ids")
681
+ attention_mask = inputs.get("attention_mask")
682
+ acoustic_input_mask = inputs.get("acoustic_input_mask")
683
+
684
+ # Ensure semantic tensors exist and have correct dtype/device
685
+ sem = inputs.get("speech_semantic_tensors", None)
686
+ try:
687
+ target_dtype = next(model.model.semantic_connector.parameters()).dtype
688
+ except Exception:
689
+ target_dtype = model.get_input_embeddings().weight.dtype
690
+
691
+ if sem is None:
692
+ sm = inputs.get("speech_masks")
693
+ if sm is not None:
694
+ zeros = torch.zeros(
695
+ sm.size(0), sm.size(1),
696
+ getattr(model.config, "semantic_vae_dim", 128),
697
+ dtype=target_dtype,
698
+ device=sm.device,
699
+ )
700
+ inputs["speech_semantic_tensors"] = zeros
701
+ else:
702
+ if isinstance(sem, torch.Tensor):
703
+ inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
704
+
705
+ outputs = model(
706
+ input_ids=inputs.get("input_ids"),
707
+ attention_mask=attention_mask,
708
+ speech_tensors=inputs.get("speech_tensors"),
709
+ speech_masks=inputs.get("speech_masks"),
710
+ speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
711
+ acoustic_input_mask=acoustic_input_mask,
712
+ acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
713
+ speeches_loss_input=inputs.get("speeches_loss_input"),
714
+ ddpm_batch_mul=training_args.ddpm_batch_mul,
715
+ )
716
+
717
+ # Invariants: token/latent selection equality across views (warn, don't assert)
718
+ try:
719
+ al_mask = inputs.get("acoustic_loss_mask")
720
+ sp_masks = inputs.get("speech_masks")
721
+ sp_loss_sel = inputs.get("speeches_loss_input")
722
+ num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
723
+ num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
724
+ num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
725
+ num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
726
+ self.log({
727
+ "debug/num_tok_total": float(num_tok_total),
728
+ "debug/num_tok_loss": float(num_tok_loss),
729
+ "debug/num_lat_total": float(num_lat_total),
730
+ "debug/num_lat_loss": float(num_lat_loss),
731
+ })
732
+ if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
733
+ if num_tok_loss != num_lat_loss:
734
+ logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
735
+ except Exception:
736
+ pass
737
+
738
+ # CE Loss
739
+ logits = outputs.logits
740
+ ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
741
+ shift_logits = logits[:, :-1, :].contiguous()
742
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
743
+ ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
744
+
745
+ # Optional CE diagnostics
746
+ try:
747
+ self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
748
+ except Exception as e:
749
+ logger.warning(f"Failed invoking CE debug: {e}")
750
+
751
+ # Diffusion loss
752
+ diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
753
+ total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
754
+
755
+ # Logs
756
+ try:
757
+ prefix = "train" if model.training else "eval"
758
+ self.log({
759
+ f"{prefix}/ce_loss": ce_loss.detach().item(),
760
+ f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
761
+ })
762
+ if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
763
+ lr_val = self.optimizer.param_groups[0].get("lr", None)
764
+ if lr_val is not None:
765
+ self.log({"train/learning_rate_real": float(lr_val)})
766
+ except Exception:
767
+ pass
768
+
769
+ return (total, outputs) if return_outputs else total
770
+
771
+ def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
772
+ try:
773
+ if not getattr(training_args, "debug_ce_details", False):
774
+ return
775
+ step = int(getattr(self.state, "global_step", 0) or 0)
776
+ every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
777
+ if not (step <= 1 or (step % every_n == 0)):
778
+ return
779
+
780
+ with torch.no_grad():
781
+ vocab = shift_logits.size(-1)
782
+ per_token_loss = F.cross_entropy(
783
+ shift_logits.view(-1, vocab),
784
+ ce_labels.view(-1),
785
+ reduction="none",
786
+ ignore_index=-100,
787
+ ).view_as(ce_labels)
788
+
789
+ valid_mask = ce_labels.ne(-100)
790
+ num_valid = int(valid_mask.sum().item())
791
+ avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
792
+
793
+ per_ex_avgs = []
794
+ max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
795
+ B = ce_labels.size(0)
796
+ for b in range(min(B, max_examples)):
797
+ vb = valid_mask[b]
798
+ if int(vb.sum().item()) > 0:
799
+ per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
800
+ else:
801
+ per_ex_avgs.append(float("nan"))
802
+ logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
803
+ except Exception as e:
804
+ logger.warning(f"CE detailed debug failed: {e}")
805
+
806
+ # --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
807
+
808
+
809
+ def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
810
+ try:
811
+ target_dir = output_dir or self.args.output_dir
812
+ lora_out = os.path.join(target_dir, "lora")
813
+ os.makedirs(lora_out, exist_ok=True)
814
+
815
+ # --- LLM PEFT adapters (if LoRA-wrapped) ---
816
+ language_model = getattr(self.model.model, "language_model", None)
817
+ if hasattr(language_model, "save_pretrained"):
818
+ language_model.save_pretrained(lora_out)
819
+
820
+ # --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
821
+ pred_head = getattr(self.model.model, "prediction_head", None)
822
+ if hasattr(pred_head, "save_pretrained"):
823
+ ph_dir = os.path.join(lora_out, "diffusion_head")
824
+ os.makedirs(ph_dir, exist_ok=True)
825
+ pred_head.save_pretrained(ph_dir)
826
+
827
+ # --- ALWAYS save FULL diffusion head state_dict for fallback ---
828
+ if pred_head is not None and hasattr(pred_head, "state_dict"):
829
+ sd = pred_head.state_dict()
830
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
831
+ ph_dir = os.path.join(lora_out, "diffusion_head")
832
+ os.makedirs(ph_dir, exist_ok=True)
833
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
834
+
835
+ # --- Connectors (plain state_dicts) ---
836
+ ac = getattr(self.model.model, "acoustic_connector", None)
837
+ if ac is not None:
838
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
839
+ os.makedirs(ac_dir, exist_ok=True)
840
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
841
+
842
+ se = getattr(self.model.model, "semantic_connector", None)
843
+ if se is not None:
844
+ se_dir = os.path.join(lora_out, "semantic_connector")
845
+ os.makedirs(se_dir, exist_ok=True)
846
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
847
+
848
+ except Exception as e:
849
+ logger.warning(f"Failed to save LoRA assets: {e}")
850
+
851
+
852
+ # ------------- Build the Trainer -------------
853
+
854
+ # Resolve which adapters to apply in samples
855
+
856
+ ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cuda")
857
+
858
+ # --- CRITICAL FIX: CAST TRAINABLE PARAMS TO FP32 ---
859
+ # This prevents 'ValueError: Attempting to unscale FP16 gradients'
860
+ if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
861
+ print('>>> INFO: Enforcing float32 for trainable parameters (LoRA/Head) to fix GradScaler.')
862
+ for name, param in model.named_parameters():
863
+ if param.requires_grad:
864
+ param.data = param.data.to(torch.float32)
865
+ # ---------------------------------------------------
866
+
867
+ trainer = VibeVoiceTrainer(
868
+ model=model,
869
+ args=training_args,
870
+ train_dataset=train_dataset,
871
+ eval_dataset=eval_dataset,
872
+ data_collator=data_collator,
873
+ callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
874
+ )
875
+
876
+ # Optional debug pre-training save
877
+ if getattr(training_args, "debug_save", False):
878
+ try:
879
+ debug_dir = os.path.join(training_args.output_dir, "debug_initial")
880
+ lora_out = os.path.join(debug_dir, "lora")
881
+ os.makedirs(lora_out, exist_ok=True)
882
+ logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
883
+ # language model adapters / base
884
+ try:
885
+ if hasattr(model.model.language_model, "save_pretrained"):
886
+ model.model.language_model.save_pretrained(lora_out)
887
+ except Exception as e_lm:
888
+ logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
889
+ # diffusion head
890
+ try:
891
+ if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
892
+ model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
893
+ except Exception as e_head:
894
+ logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
895
+ # NEW: full diffusion head state_dict as fallback
896
+ try:
897
+ ph = getattr(model.model, "prediction_head", None)
898
+ if ph is not None and hasattr(ph, "state_dict"):
899
+ sd = ph.state_dict()
900
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
901
+ os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
902
+ torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
903
+ except Exception as e:
904
+ logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
905
+ # connectors
906
+ try:
907
+ ac_conn = getattr(model.model, "acoustic_connector", None)
908
+ if ac_conn is not None:
909
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
910
+ os.makedirs(ac_dir, exist_ok=True)
911
+ torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
912
+ except Exception as e_ac:
913
+ logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
914
+ try:
915
+ se_conn = getattr(model.model, "semantic_connector", None)
916
+ if se_conn is not None:
917
+ se_dir = os.path.join(lora_out, "semantic_connector")
918
+ os.makedirs(se_dir, exist_ok=True)
919
+ torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
920
+ except Exception as e_se:
921
+ logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
922
+ except Exception as e:
923
+ logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
924
+
925
+ if getattr(training_args, "gradient_checkpointing", False):
926
+ try:
927
+ model.gradient_checkpointing_enable()
928
+ except Exception:
929
+ logger.warning("Failed to enable gradient checkpointing on the model.")
930
+
931
+ if training_args.do_train:
932
+ trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
933
+
934
+ lora_out = os.path.join(training_args.output_dir, "lora")
935
+ os.makedirs(lora_out, exist_ok=True)
936
+
937
+ # LLM PEFT (if any)
938
+ lm = getattr(model.model, "language_model", None)
939
+ if hasattr(lm, "save_pretrained"):
940
+ lm.save_pretrained(lora_out)
941
+
942
+ # Diffusion head PEFT (if any)
943
+ ph = getattr(model.model, "prediction_head", None)
944
+ if hasattr(ph, "save_pretrained"):
945
+ ph_dir = os.path.join(lora_out, "diffusion_head")
946
+ os.makedirs(ph_dir, exist_ok=True)
947
+ ph.save_pretrained(ph_dir)
948
+
949
+ # ALWAYS: full diffusion head state_dict fallback
950
+ try:
951
+ if ph is not None and hasattr(ph, "state_dict"):
952
+ sd = ph.state_dict()
953
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
954
+ ph_dir = os.path.join(lora_out, "diffusion_head")
955
+ os.makedirs(ph_dir, exist_ok=True)
956
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
957
+ except Exception as e:
958
+ logger.warning(f"Failed to save FULL diffusion head at end: {e}")
959
+
960
+ # Connectors (if trained)
961
+ try:
962
+ ac = getattr(model.model, "acoustic_connector", None)
963
+ if ac is not None:
964
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
965
+ os.makedirs(ac_dir, exist_ok=True)
966
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
967
+ except Exception as e:
968
+ logger.warning(f"Failed to save acoustic_connector: {e}")
969
+
970
+ try:
971
+ se = getattr(model.model, "semantic_connector", None)
972
+ if se is not None:
973
+ se_dir = os.path.join(lora_out, "semantic_connector")
974
+ os.makedirs(se_dir, exist_ok=True)
975
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
976
+ except Exception as e:
977
+ logger.warning(f"Failed to save semantic_connector: {e}")
978
+
979
+ if training_args.do_eval and eval_dataset is not None:
980
+ trainer.evaluate()
981
+
982
+
983
+ if __name__ == "__main__":
984
+ main()
VibeVoice-finetuning/src/finetune_vibevoice_lora00.py ADDED
@@ -0,0 +1,1005 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_vibevoice_lora.py
2
+ import os
3
+ # متغیر زیر کامنت شده است تا سیستم بتواند تمام GPUها را در حالت DDP ببیند
4
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
5
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
6
+
7
+ import logging
8
+ import copy
9
+ from dataclasses import dataclass, field
10
+ from typing import Any, Dict, List, Optional, Tuple
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+ from datasets import load_dataset, DatasetDict, VerificationMode
16
+
17
+ from transformers import (
18
+ HfArgumentParser,
19
+ Trainer,
20
+ set_seed,
21
+ TrainerCallback,
22
+ )
23
+ from transformers import TrainingArguments as HfTrainingArguments
24
+
25
+ from peft import LoraConfig, get_peft_model, TaskType
26
+
27
+ from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
28
+ from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
29
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
30
+
31
+ from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+ # ================== SAMPLE CALLBACK UTILS ==================
36
+
37
+ class EmaCallback(TrainerCallback):
38
+ def __init__(self, attr_path="model.prediction_head", decay=0.999):
39
+ """
40
+ attr_path: where the head lives under self.model
41
+ decay: EMA decay
42
+ """
43
+ self.attr_path = attr_path
44
+ self.decay = float(decay)
45
+ self.shadow = None
46
+ self._orig = None # store non-EMA weights when we swap
47
+
48
+ def _get_module(self, model):
49
+ # رفع مشکل DDP: دسترسی به مدل اصلی در صورت Wrap شدن با DistributedDataParallel
50
+ mod = model.module if hasattr(model, "module") else model
51
+ for name in self.attr_path.split('.'):
52
+ mod = getattr(mod, name)
53
+ return mod
54
+
55
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
56
+ head = self._get_module(model)
57
+ # استفاده از دیوایس داینامیک برای پشتیبانی از چند گرافیک
58
+ self.shadow = {k: p.detach().clone()
59
+ for k, p in head.state_dict().items()}
60
+
61
+ def on_step_end(self, args, state, control, model=None, **kwargs):
62
+ if self.shadow is None: return
63
+ head = self._get_module(model)
64
+ with torch.no_grad():
65
+ for k, v in head.state_dict().items():
66
+ target_device = self.shadow[k].device
67
+ self.shadow[k].mul_(self.decay).add_(v.detach().to(target_device), alpha=(1.0 - self.decay))
68
+
69
+ # ---- Swap helpers ----
70
+ def _swap_in_ema(self, model):
71
+ head = self._get_module(model)
72
+ self._orig = copy.deepcopy(head.state_dict())
73
+ head.load_state_dict(self.shadow, strict=False)
74
+
75
+ def _swap_back(self, model):
76
+ if self._orig is None: return
77
+ head = self._get_module(model)
78
+ head.load_state_dict(self._orig, strict=False)
79
+ self._orig = None
80
+
81
+ def on_evaluate(self, args, state, control, model=None, **kwargs):
82
+ self._swap_in_ema(model)
83
+
84
+ def on_evaluate_end(self, args, state, control, model=None, **kwargs):
85
+ self._swap_back(model)
86
+
87
+ def on_save(self, args, state, control, model=None, **kwargs):
88
+ self._swap_in_ema(model)
89
+
90
+ def on_save_end(self, args, state, control, model=None, **kwargs):
91
+ self._swap_back(model)
92
+
93
+ def on_train_end(self, args, state, control, model=None, **kwargs):
94
+ self._swap_in_ema(model)
95
+
96
+
97
+ @dataclass
98
+ class ModelArguments:
99
+ model_name_or_path: Optional[str] = field(
100
+ default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
101
+ )
102
+ processor_name_or_path: Optional[str] = field(
103
+ default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
104
+ )
105
+ cache_dir: Optional[str] = field(default=None)
106
+ freeze_acoustic_tokenizer: bool = field(default=True)
107
+ freeze_semantic_tokenizer: bool = field(default=True)
108
+ lora_r: int = field(default=8)
109
+ lora_alpha: int = field(default=32)
110
+ lora_dropout: float = field(default=0.05)
111
+ lora_target_modules: str = field(
112
+ default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
113
+ metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
114
+ )
115
+ lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
116
+ train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
117
+ train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
118
+ layers_to_freeze: Optional[str] = field(
119
+ default=None,
120
+ metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
121
+ )
122
+
123
+ @dataclass
124
+ class DataArguments:
125
+ dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
126
+ dataset_config_name: Optional[str] = field(default=None)
127
+ train_split_name: str = field(default="train")
128
+ eval_split_name: Optional[str] = field(default="validation")
129
+ text_column_name: str = field(default="text")
130
+ audio_column_name: str = field(default="audio")
131
+ voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
132
+ eval_split_size: float = field(default=0.0)
133
+ ignore_verifications: bool = field(default=False)
134
+ max_length: Optional[int] = field(default=None)
135
+ train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
136
+ validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
137
+ voice_prompt_drop_rate: float = field(
138
+ default=0.0,
139
+ metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
140
+ )
141
+
142
+ @dataclass
143
+ class CustomTrainingArguments(HfTrainingArguments):
144
+ ddpm_batch_mul: int = field(default=1)
145
+ ce_loss_weight: float = field(default=1.0)
146
+ diffusion_loss_weight: float = field(default=1.0)
147
+ debug_ce_details: bool = field(default=False)
148
+ debug_ce_topk: int = field(default=5)
149
+ debug_ce_max_examples: int = field(default=1)
150
+ debug_ce_every_n_steps: int = field(default=200)
151
+ gradient_clipping: bool = field(
152
+ default=False,
153
+ metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
154
+ )
155
+ debug_save: bool = field(
156
+ default=False,
157
+ metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
158
+ )
159
+
160
+ def build_lora_config(args: ModelArguments) -> LoraConfig:
161
+ target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
162
+ return LoraConfig(
163
+ r=args.lora_r,
164
+ lora_alpha=args.lora_alpha,
165
+ lora_dropout=args.lora_dropout,
166
+ bias="none",
167
+ task_type=TaskType.CAUSAL_LM,
168
+ target_modules=target_modules,
169
+ )
170
+
171
+ def build_head_lora_config(args: ModelArguments) -> LoraConfig:
172
+ target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
173
+ return LoraConfig(
174
+ r=args.lora_r,
175
+ lora_alpha=args.lora_alpha,
176
+ lora_dropout=args.lora_dropout,
177
+ bias="none",
178
+ task_type=TaskType.FEATURE_EXTRACTION,
179
+ target_modules=target_modules,
180
+ )
181
+
182
+ def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
183
+ shifted = labels[:, 1:].contiguous()
184
+ base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
185
+ label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
186
+ final_mask = base_mask & (~label_is_acoustic)
187
+ out = shifted.clone()
188
+ out[~final_mask] = pad_id
189
+ return out
190
+
191
+ def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
192
+ try:
193
+ # هندل کردن دسترسی به مدل در حالت DDP
194
+ actual_model = model_obj.module if hasattr(model_obj, "module") else model_obj
195
+ acoustic = getattr(getattr(actual_model, "model", actual_model), "acoustic_tokenizer", None)
196
+ if acoustic is None or not hasattr(acoustic, "encode"):
197
+ logger_.warning("No acoustic_tokenizer.encode() found to patch.")
198
+ return
199
+ base_encode = acoustic.encode
200
+ def encode_wrapped(*args, **kwargs):
201
+ out = base_encode(*args, **kwargs)
202
+ try:
203
+ _ = out[0][0]
204
+ return out
205
+ except Exception:
206
+ pass
207
+ if isinstance(out, dict):
208
+ for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
209
+ if k in out:
210
+ return [[out[k]]]
211
+ if len(out) > 0:
212
+ return [[next(iter(out.values()))]]
213
+ for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
214
+ if hasattr(out, attr):
215
+ return [[getattr(out, attr)]]
216
+ try:
217
+ if isinstance(out, torch.Tensor):
218
+ return [[out]]
219
+ except Exception:
220
+ pass
221
+ return [[out]]
222
+ acoustic.encode = encode_wrapped
223
+ logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
224
+ except Exception as e:
225
+ logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
226
+
227
+ def main() -> None:
228
+ parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
229
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
230
+
231
+ logging.basicConfig(
232
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
233
+ datefmt="%m/%d/%Y %H:%M:%S",
234
+ level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
235
+ )
236
+ logger.info("Training/evaluation parameters %s", training_args)
237
+ set_seed(training_args.seed)
238
+
239
+ # بدست آوردن Rank گرافیک فعلی برای تخصیص صحیح در DDP
240
+ local_rank = int(os.environ.get("LOCAL_RANK", -1))
241
+ device_map = {"": local_rank} if local_rank != -1 else None
242
+
243
+ # Configure gradient clipping
244
+ if not getattr(training_args, "gradient_clipping", False):
245
+ if hasattr(training_args, "max_grad_norm"):
246
+ training_args.max_grad_norm = 0.0
247
+ logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
248
+ else:
249
+ if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
250
+ training_args.max_grad_norm = 1.0
251
+ logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
252
+
253
+ # Load processor
254
+ processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
255
+ if processor_path is None:
256
+ raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
257
+ processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
258
+
259
+ # Required special tokens
260
+ tok = processor.tokenizer
261
+ for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
262
+ if not hasattr(tok, required) or getattr(tok, required) is None:
263
+ raise RuntimeError(f"Tokenizer missing required special id: {required}")
264
+
265
+ # Load model (تخصیص مدل به گرافیک‌ها با استفاده از device_map محاسبه شده)
266
+ if model_args.model_name_or_path is None:
267
+ raise ValueError("--model_name_or_path is required to load VibeVoice base model")
268
+ dtype = torch.float32
269
+ if training_args.bf16:
270
+ dtype = torch.bfloat16
271
+ elif getattr(training_args, "fp16", False):
272
+ dtype = torch.float16
273
+ model = VibeVoiceForConditionalGeneration.from_pretrained(
274
+ model_args.model_name_or_path,
275
+ torch_dtype=dtype,
276
+ device_map=device_map,
277
+ )
278
+
279
+ _patch_acoustic_encode_for_legacy_indexing(model, logger)
280
+ processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
281
+
282
+ # Diagnostics: LM head tie
283
+ try:
284
+ in_emb_mod = model.get_input_embeddings()
285
+ out_emb_mod = model.get_output_embeddings()
286
+ in_w = getattr(in_emb_mod, "weight", None)
287
+ out_w = getattr(out_emb_mod, "weight", None)
288
+ shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
289
+ values_equal = False
290
+ if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
291
+ try:
292
+ values_equal = bool(torch.allclose(in_w, out_w))
293
+ except Exception:
294
+ values_equal = False
295
+ try:
296
+ tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
297
+ except Exception:
298
+ tie_cfg = getattr(model.config, "tie_word_embeddings", None)
299
+ logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
300
+ if out_w is not None:
301
+ logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
302
+ except Exception as e:
303
+ logger.warning(f"LM head tie diagnostics failed: {e}")
304
+
305
+ # Hard-tie LM head
306
+ try:
307
+ emb_module = model.get_input_embeddings()
308
+ head_module = model.get_output_embeddings()
309
+ if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
310
+ if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
311
+ with torch.no_grad():
312
+ head_module.weight = emb_module.weight
313
+ logger.info("Force-tied LM head weight to input embeddings (pointer share).")
314
+ except Exception as e:
315
+ logger.warning(f"Force-tie of LM head failed: {e}")
316
+
317
+ # Validate special IDs (info logs only)
318
+ try:
319
+ special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
320
+ try:
321
+ vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
322
+ except Exception:
323
+ vocab_size = 0
324
+ in_emb_mod = model.get_input_embeddings()
325
+ out_emb_mod = model.get_output_embeddings()
326
+ in_w = getattr(in_emb_mod, "weight", None)
327
+ out_w = getattr(out_emb_mod, "weight", None)
328
+ for name in special_names:
329
+ val = getattr(tok, name, None)
330
+ exists = (val is not None)
331
+ in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
332
+ equal_row = None
333
+ if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
334
+ try:
335
+ equal_row = bool(torch.allclose(in_w[val], out_w[val]))
336
+ except Exception:
337
+ equal_row = False
338
+ decoded_str = None
339
+ if exists and isinstance(val, int):
340
+ try:
341
+ decoded_str = tok.decode([val])
342
+ except Exception:
343
+ try:
344
+ decoded_str = tok.convert_ids_to_tokens(val)
345
+ except Exception:
346
+ decoded_str = "<decode_failed>"
347
+ logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
348
+ except Exception as e:
349
+ logger.warning(f"Special token ID/row validation failed: {e}")
350
+
351
+ # Quick tokenizer diagnostics (optional)
352
+ try:
353
+ logger.info("=== TOKENIZER DIAGNOSTICS ===")
354
+ logger.info(f"Tokenizer class: {type(tok).__name__}")
355
+ logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
356
+ # tiny CE smoke test
357
+ with torch.no_grad():
358
+ simple_text = "The cat sat on the mat."
359
+ simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
360
+ simple_mask = torch.ones_like(simple_ids)
361
+ x = model.get_input_embeddings()(simple_ids)
362
+ outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
363
+ logits = model.lm_head(outputs.last_hidden_state)
364
+ shift_logits = logits[:, :-1, :].contiguous()
365
+ shift_labels = simple_ids[:, 1:].contiguous()
366
+ ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
367
+ logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
368
+ except Exception as e:
369
+ logger.warning(f"Tokenizer diagnostics failed: {e}")
370
+
371
+ # Disable cache during training
372
+ if hasattr(model.config, "use_cache") and training_args.do_train:
373
+ model.config.use_cache = False
374
+
375
+ # Freeze tokenizers
376
+ if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
377
+ for p in model.model.acoustic_tokenizer.parameters():
378
+ p.requires_grad = False
379
+ if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
380
+ for p in model.model.semantic_tokenizer.parameters():
381
+ p.requires_grad = False
382
+
383
+ # LoRA wrap LLM (optional)
384
+ lora_cfg = build_lora_config(model_args)
385
+ tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
386
+ skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
387
+ if not skip_lm_lora:
388
+ model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
389
+ else:
390
+ logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
391
+
392
+ try:
393
+ model.tie_weights()
394
+ except Exception:
395
+ pass
396
+
397
+ # Freeze all then enable trainable subsets
398
+ for _, p in model.named_parameters():
399
+ p.requires_grad = False
400
+
401
+ try:
402
+ for n, p in model.model.language_model.named_parameters():
403
+ if "lora_A" in n or "lora_B" in n:
404
+ p.requires_grad = True
405
+ except Exception:
406
+ logger.warning("Could not re-enable LoRA params on language_model.")
407
+
408
+ # Diffusion head LoRA wrapping (optional)
409
+ if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
410
+ class _HeadForwardShim(nn.Module):
411
+ def __init__(self, base: nn.Module): super().__init__(); self.base = base
412
+ def forward(self, *args, **kwargs):
413
+ if len(args) >= 3:
414
+ noisy_images, timesteps, condition = args[:3]
415
+ else:
416
+ noisy_images = kwargs.get("noisy_images")
417
+ timesteps = kwargs.get("timesteps")
418
+ condition = kwargs.get("condition")
419
+ return self.base(noisy_images, timesteps, condition)
420
+ try:
421
+ shim = _HeadForwardShim(model.model.prediction_head)
422
+ model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
423
+ for n, p in model.model.prediction_head.named_parameters():
424
+ if "lora_A" in n or "lora_B" in n:
425
+ p.requires_grad = True
426
+ except Exception as e:
427
+ logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
428
+
429
+ # Train full diffusion head (optional)
430
+ if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
431
+ for p in model.model.prediction_head.parameters():
432
+ p.requires_grad = True
433
+
434
+ # Freeze diffusion head layers (optional)
435
+ if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
436
+ head_params = list(model.model.prediction_head.named_parameters())
437
+ try:
438
+ indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
439
+ frozen_count = 0
440
+ for i, (name, param) in enumerate(head_params):
441
+ if i in indices_to_freeze:
442
+ param.requires_grad = False
443
+ frozen_count += 1
444
+ logger.info(f"Froze layer [{i}]: {name}")
445
+ logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
446
+ except Exception as e:
447
+ logger.error(f"Could not parse --layers_to_freeze: {e}")
448
+ raise
449
+
450
+ # Connectors
451
+ if getattr(model_args, "train_connectors", False):
452
+ if hasattr(model.model, "acoustic_connector"):
453
+ for p in model.model.acoustic_connector.parameters():
454
+ p.requires_grad = True
455
+ if hasattr(model.model, "semantic_connector"):
456
+ for p in model.model.semantic_connector.parameters():
457
+ p.requires_grad = True
458
+ else:
459
+ if hasattr(model.model, "acoustic_connector"):
460
+ for p in model.model.acoustic_connector.parameters():
461
+ p.requires_grad = False
462
+ if hasattr(model.model, "semantic_connector"):
463
+ for p in model.model.semantic_connector.parameters():
464
+ p.requires_grad = False
465
+
466
+ # Freeze embedding + head
467
+ try:
468
+ emb = model.get_input_embeddings()
469
+ if hasattr(emb, "weight"):
470
+ emb.weight.requires_grad_(False)
471
+ head = model.get_output_embeddings()
472
+ if head is not None and hasattr(head, "weight"):
473
+ head.weight.requires_grad_(False)
474
+ except Exception:
475
+ pass
476
+
477
+ # Diagnostics
478
+ def _sum_params(named_iter):
479
+ return sum(p.numel() for _, p in named_iter if p.requires_grad)
480
+ try:
481
+ lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
482
+ pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
483
+ ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
484
+ se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
485
+ total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
486
+ logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
487
+ logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
488
+ except Exception:
489
+ pass
490
+
491
+ # Preprocessed data classes
492
+ class PreprocessedBatchDataset:
493
+ def __init__(self, preprocessed_file: str):
494
+ self.data = torch.load(preprocessed_file, map_location='cpu')
495
+ logger.info(f"Loaded {len(self.data)} preprocessed batches from {preprocessed_file}")
496
+
497
+ def __len__(self):
498
+ return len(self.data)
499
+
500
+ def __getitem__(self, idx):
501
+ batch = self.data[idx]
502
+ result = {}
503
+ for k, v in batch.items():
504
+ if isinstance(v, torch.Tensor):
505
+ result[k] = v
506
+ else:
507
+ result[k] = v
508
+ return result
509
+
510
+ class PreprocessedBatchSubset:
511
+ def __init__(self, dataset: 'PreprocessedBatchDataset', indices: List[int]):
512
+ self.dataset = dataset
513
+ self.indices = indices
514
+
515
+ def __len__(self):
516
+ return len(self.indices)
517
+
518
+ def __getitem__(self, idx):
519
+ actual_idx = self.indices[idx]
520
+ return self.dataset[actual_idx]
521
+
522
+ class PreprocessedBatchCollator:
523
+ def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
524
+ if not batch:
525
+ return {}
526
+ result = {}
527
+ for key in batch[0].keys():
528
+ tensors = [b[key] for b in batch if b[key] is not None]
529
+ if tensors and isinstance(tensors[0], torch.Tensor):
530
+ result[key] = torch.cat(tensors, dim=0)
531
+ else:
532
+ result[key] = tensors[0] if tensors else None
533
+ return result
534
+
535
+ # Datasets
536
+ preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
537
+ preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
538
+
539
+ if os.path.exists(preprocessed_file):
540
+ logger.info(f"Loading preprocessed data from {preprocessed_file}")
541
+ preprocessed_data = PreprocessedBatchDataset(preprocessed_file)
542
+
543
+ train_dataset = preprocessed_data
544
+ eval_dataset = None
545
+
546
+ if training_args.do_eval and data_args.eval_split_size and data_args.eval_split_size > 0 and len(preprocessed_data) > 1:
547
+ num_eval = max(1, int(len(preprocessed_data) * data_args.eval_split_size))
548
+ num_train = len(preprocessed_data) - num_eval
549
+ indices = list(range(len(preprocessed_data)))
550
+ import random
551
+ random.Random(training_args.seed).shuffle(indices)
552
+ train_indices = indices[:num_train]
553
+ eval_indices = indices[num_train:]
554
+ train_dataset = PreprocessedBatchSubset(preprocessed_data, train_indices)
555
+ eval_dataset = PreprocessedBatchSubset(preprocessed_data, eval_indices)
556
+ else:
557
+ logger.info(f"Preprocessed data not found at {preprocessed_file}, loading from raw JSONL/HF datasets")
558
+ verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
559
+ if data_args.train_jsonl is not None:
560
+ data_files: Dict[str, str] = {"train": data_args.train_jsonl}
561
+ if data_args.validation_jsonl is not None:
562
+ data_files["validation"] = data_args.validation_jsonl
563
+ raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
564
+ else:
565
+ if data_args.dataset_name is None:
566
+ raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
567
+ raw = load_dataset(
568
+ data_args.dataset_name,
569
+ data_args.dataset_config_name,
570
+ verification_mode=verification_mode,
571
+ cache_dir=model_args.cache_dir,
572
+ )
573
+ train_ds = raw[data_args.train_split_name]
574
+ eval_ds = None
575
+ if training_args.do_eval:
576
+ if data_args.eval_split_name and data_args.eval_split_name in raw:
577
+ eval_ds = raw[data_args.eval_split_name]
578
+ elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
579
+ split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
580
+ train_ds, eval_ds = split["train"], split["test"]
581
+
582
+ train_dataset = VibeVoiceDataset(
583
+ train_ds,
584
+ text_column=data_args.text_column_name,
585
+ audio_column=data_args.audio_column_name,
586
+ voice_prompts_column=data_args.voice_prompts_column_name,
587
+ )
588
+ eval_dataset = None
589
+ if eval_ds is not None:
590
+ eval_dataset = VibeVoiceDataset(
591
+ eval_ds,
592
+ text_column=data_args.text_column_name,
593
+ audio_column=data_args.audio_column_name,
594
+ voice_prompts_column=data_args.voice_prompts_column_name,
595
+ )
596
+
597
+ # Ratios/dims from processor+model
598
+ speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
599
+ semantic_dim = getattr(model.config, "semantic_vae_dim", None)
600
+ if semantic_dim is None:
601
+ try:
602
+ semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
603
+ except Exception:
604
+ semantic_dim = 128
605
+
606
+ compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
607
+
608
+ if os.path.exists(preprocessed_file):
609
+ data_collator = PreprocessedBatchCollator()
610
+ else:
611
+ data_collator = VibeVoiceCollator(
612
+ processor=processor,
613
+ max_length=data_args.max_length,
614
+ speech_compress_ratio=speech_compress_ratio,
615
+ semantic_vae_dim=semantic_dim,
616
+ compute_semantics=compute_semantics_flag,
617
+ debug_checks=False,
618
+ voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
619
+ )
620
+
621
+ class LoRADebugCallback(TrainerCallback):
622
+ def __init__(self, log_every_n_steps: int = 50):
623
+ self.log_every_n_steps = max(1, int(log_every_n_steps))
624
+ self.prev_param_norms: Dict[str, float] = {}
625
+ self.lora_param_names: List[str] = []
626
+
627
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
628
+ try:
629
+ if model is None:
630
+ return
631
+ # دسترسی ایمن به مدل در حالت DDP
632
+ actual_model = model.module if hasattr(model, "module") else model
633
+ named: Dict[str, torch.nn.Parameter] = dict(actual_model.named_parameters())
634
+ self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
635
+ for n in self.lora_param_names:
636
+ p = named[n]
637
+ self.prev_param_norms[n] = float(p.data.norm().item())
638
+ total = len(self.lora_param_names)
639
+ req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
640
+ num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
641
+ num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
642
+ zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
643
+ logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
644
+ if total == 0:
645
+ logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
646
+ if req_grad != total:
647
+ logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
648
+ except Exception as e:
649
+ logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
650
+
651
+ def on_step_end(self, args, state, control, model=None, **kwargs):
652
+ try:
653
+ if model is None or len(self.lora_param_names) == 0:
654
+ return
655
+ step = int(getattr(state, "global_step", 0) or 0)
656
+ if step % self.log_every_n_steps != 0 and step != 1:
657
+ return
658
+
659
+ actual_model = model.module if hasattr(model, "module") else model
660
+ named: Dict[str, torch.nn.Parameter] = dict(actual_model.named_parameters())
661
+ changed_A = 0
662
+ changed_B = 0
663
+ zero_B = 0
664
+ eps = 1e-12
665
+ for n in self.lora_param_names:
666
+ p = named.get(n, None)
667
+ if p is None:
668
+ continue
669
+ prev = self.prev_param_norms.get(n, 0.0)
670
+ curr = float(p.data.norm().item())
671
+ if "lora_A" in n and abs(curr - prev) > eps:
672
+ changed_A += 1
673
+ if "lora_B" in n:
674
+ if abs(curr - prev) > eps:
675
+ changed_B += 1
676
+ if curr == 0.0:
677
+ zero_B += 1
678
+ self.prev_param_norms[n] = curr
679
+ total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
680
+ total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
681
+ logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
682
+ except Exception as e:
683
+ logger.warning(f"LoRA debug (on_step_end) failed: {e}")
684
+
685
+ class VibeVoiceTrainer(Trainer):
686
+ def compute_loss(self, model, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
687
+ # باز کردن DDP Wrapper برای دسترسی به متغیرهای داخلی مدل (جلوگیری از خطای DDP)
688
+ actual_model = model.module if hasattr(model, "module") else model
689
+
690
+ labels = inputs.get("input_ids")
691
+ attention_mask = inputs.get("attention_mask")
692
+ acoustic_input_mask = inputs.get("acoustic_input_mask")
693
+
694
+ # Ensure semantic tensors exist and have correct dtype/device
695
+ sem = inputs.get("speech_semantic_tensors", None)
696
+ try:
697
+ target_dtype = next(actual_model.model.semantic_connector.parameters()).dtype
698
+ except Exception:
699
+ target_dtype = actual_model.get_input_embeddings().weight.dtype
700
+
701
+ if sem is None:
702
+ sm = inputs.get("speech_masks")
703
+ if sm is not None:
704
+ zeros = torch.zeros(
705
+ sm.size(0), sm.size(1),
706
+ getattr(actual_model.config, "semantic_vae_dim", 128),
707
+ dtype=target_dtype,
708
+ device=sm.device,
709
+ )
710
+ inputs["speech_semantic_tensors"] = zeros
711
+ else:
712
+ if isinstance(sem, torch.Tensor):
713
+ inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
714
+
715
+ outputs = model(
716
+ input_ids=inputs.get("input_ids"),
717
+ attention_mask=attention_mask,
718
+ speech_tensors=inputs.get("speech_tensors"),
719
+ speech_masks=inputs.get("speech_masks"),
720
+ speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
721
+ acoustic_input_mask=acoustic_input_mask,
722
+ acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
723
+ speeches_loss_input=inputs.get("speeches_loss_input"),
724
+ ddpm_batch_mul=training_args.ddpm_batch_mul,
725
+ )
726
+
727
+ # Invariants: token/latent selection equality across views (warn, don't assert)
728
+ try:
729
+ al_mask = inputs.get("acoustic_loss_mask")
730
+ sp_masks = inputs.get("speech_masks")
731
+ sp_loss_sel = inputs.get("speeches_loss_input")
732
+ num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
733
+ num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
734
+ num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
735
+ num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
736
+ self.log({
737
+ "debug/num_tok_total": float(num_tok_total),
738
+ "debug/num_tok_loss": float(num_tok_loss),
739
+ "debug/num_lat_total": float(num_lat_total),
740
+ "debug/num_lat_loss": float(num_lat_loss),
741
+ })
742
+ if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
743
+ if num_tok_loss != num_lat_loss:
744
+ logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
745
+ except Exception:
746
+ pass
747
+
748
+ # CE Loss
749
+ logits = outputs.logits
750
+ ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
751
+ shift_logits = logits[:, :-1, :].contiguous()
752
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
753
+ ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
754
+
755
+ # Optional CE diagnostics
756
+ try:
757
+ self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
758
+ except Exception as e:
759
+ logger.warning(f"Failed invoking CE debug: {e}")
760
+
761
+ # Diffusion loss
762
+ diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
763
+ total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
764
+
765
+ # Logs
766
+ try:
767
+ prefix = "train" if model.training else "eval"
768
+ self.log({
769
+ f"{prefix}/ce_loss": ce_loss.detach().item(),
770
+ f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
771
+ })
772
+ if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
773
+ lr_val = self.optimizer.param_groups[0].get("lr", None)
774
+ if lr_val is not None:
775
+ self.log({"train/learning_rate_real": float(lr_val)})
776
+ except Exception:
777
+ pass
778
+
779
+ return (total, outputs) if return_outputs else total
780
+
781
+ def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
782
+ try:
783
+ if not getattr(training_args, "debug_ce_details", False):
784
+ return
785
+ step = int(getattr(self.state, "global_step", 0) or 0)
786
+ every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
787
+ if not (step <= 1 or (step % every_n == 0)):
788
+ return
789
+
790
+ with torch.no_grad():
791
+ vocab = shift_logits.size(-1)
792
+ per_token_loss = F.cross_entropy(
793
+ shift_logits.view(-1, vocab),
794
+ ce_labels.view(-1),
795
+ reduction="none",
796
+ ignore_index=-100,
797
+ ).view_as(ce_labels)
798
+
799
+ valid_mask = ce_labels.ne(-100)
800
+ num_valid = int(valid_mask.sum().item())
801
+ avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
802
+
803
+ per_ex_avgs = []
804
+ max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
805
+ B = ce_labels.size(0)
806
+ for b in range(min(B, max_examples)):
807
+ vb = valid_mask[b]
808
+ if int(vb.sum().item()) > 0:
809
+ per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
810
+ else:
811
+ per_ex_avgs.append(float("nan"))
812
+ logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
813
+ except Exception as e:
814
+ logger.warning(f"CE detailed debug failed: {e}")
815
+
816
+ # --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
817
+
818
+ def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
819
+ # فقط در پراسس اصلی فایل‌ها ذخیره شوند تا از تداخل و خرابی فایل در DDP جلوگیری شود
820
+ if not self.is_world_process_zero():
821
+ return
822
+
823
+ try:
824
+ actual_model = self.model.module if hasattr(self.model, "module") else self.model
825
+ target_dir = output_dir or self.args.output_dir
826
+ lora_out = os.path.join(target_dir, "lora")
827
+ os.makedirs(lora_out, exist_ok=True)
828
+
829
+ # --- LLM PEFT adapters (if LoRA-wrapped) ---
830
+ language_model = getattr(actual_model.model, "language_model", None)
831
+ if hasattr(language_model, "save_pretrained"):
832
+ language_model.save_pretrained(lora_out)
833
+
834
+ # --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
835
+ pred_head = getattr(actual_model.model, "prediction_head", None)
836
+ if hasattr(pred_head, "save_pretrained"):
837
+ ph_dir = os.path.join(lora_out, "diffusion_head")
838
+ os.makedirs(ph_dir, exist_ok=True)
839
+ pred_head.save_pretrained(ph_dir)
840
+
841
+ # --- ALWAYS save FULL diffusion head state_dict for fallback ---
842
+ if pred_head is not None and hasattr(pred_head, "state_dict"):
843
+ sd = pred_head.state_dict()
844
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
845
+ ph_dir = os.path.join(lora_out, "diffusion_head")
846
+ os.makedirs(ph_dir, exist_ok=True)
847
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
848
+
849
+ # --- Connectors (plain state_dicts) ---
850
+ ac = getattr(actual_model.model, "acoustic_connector", None)
851
+ if ac is not None:
852
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
853
+ os.makedirs(ac_dir, exist_ok=True)
854
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
855
+
856
+ se = getattr(actual_model.model, "semantic_connector", None)
857
+ if se is not None:
858
+ se_dir = os.path.join(lora_out, "semantic_connector")
859
+ os.makedirs(se_dir, exist_ok=True)
860
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
861
+
862
+ except Exception as e:
863
+ logger.warning(f"Failed to save LoRA assets: {e}")
864
+
865
+
866
+ # ------------- Build the Trainer -------------
867
+
868
+ # Resolve which adapters to apply in samples
869
+ # توجه: دستگاه به صورت خودکار در Callback مدیریت می‌شود
870
+ ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999)
871
+
872
+ # --- CRITICAL FIX: CAST TRAINABLE PARAMS TO FP32 ---
873
+ # This prevents 'ValueError: Attempting to unscale FP16 gradients'
874
+ if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
875
+ if training_args.local_rank in [-1, 0]:
876
+ print('>>> INFO: Enforcing float32 for trainable parameters (LoRA/Head) to fix GradScaler.')
877
+ # در حالت DDP ممکن است مدل رپ شده باشد، پس بهتر است actual_model را چک کنیم
878
+ actual_model = model.module if hasattr(model, "module") else model
879
+ for name, param in actual_model.named_parameters():
880
+ if param.requires_grad:
881
+ param.data = param.data.to(torch.float32)
882
+ # ---------------------------------------------------
883
+
884
+ trainer = VibeVoiceTrainer(
885
+ model=model,
886
+ args=training_args,
887
+ train_dataset=train_dataset,
888
+ eval_dataset=eval_dataset,
889
+ data_collator=data_collator,
890
+ callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
891
+ )
892
+
893
+ # Optional debug pre-training save
894
+ if getattr(training_args, "debug_save", False):
895
+ if trainer.is_world_process_zero():
896
+ try:
897
+ actual_model = model.module if hasattr(model, "module") else model
898
+ debug_dir = os.path.join(training_args.output_dir, "debug_initial")
899
+ lora_out = os.path.join(debug_dir, "lora")
900
+ os.makedirs(lora_out, exist_ok=True)
901
+ logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
902
+ # language model adapters / base
903
+ try:
904
+ if hasattr(actual_model.model.language_model, "save_pretrained"):
905
+ actual_model.model.language_model.save_pretrained(lora_out)
906
+ except Exception as e_lm:
907
+ logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
908
+ # diffusion head
909
+ try:
910
+ if hasattr(actual_model.model, "prediction_head") and hasattr(actual_model.model.prediction_head, "save_pretrained"):
911
+ actual_model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
912
+ except Exception as e_head:
913
+ logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
914
+ # NEW: full diffusion head state_dict as fallback
915
+ try:
916
+ ph = getattr(actual_model.model, "prediction_head", None)
917
+ if ph is not None and hasattr(ph, "state_dict"):
918
+ sd = ph.state_dict()
919
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
920
+ os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
921
+ torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
922
+ except Exception as e:
923
+ logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
924
+ # connectors
925
+ try:
926
+ ac_conn = getattr(actual_model.model, "acoustic_connector", None)
927
+ if ac_conn is not None:
928
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
929
+ os.makedirs(ac_dir, exist_ok=True)
930
+ torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
931
+ except Exception as e_ac:
932
+ logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
933
+ try:
934
+ se_conn = getattr(actual_model.model, "semantic_connector", None)
935
+ if se_conn is not None:
936
+ se_dir = os.path.join(lora_out, "semantic_connector")
937
+ os.makedirs(se_dir, exist_ok=True)
938
+ torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
939
+ except Exception as e_se:
940
+ logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
941
+ except Exception as e:
942
+ logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
943
+
944
+ if getattr(training_args, "gradient_checkpointing", False):
945
+ try:
946
+ model.gradient_checkpointing_enable()
947
+ except Exception:
948
+ logger.warning("Failed to enable gradient checkpointing on the model.")
949
+
950
+ if training_args.do_train:
951
+ trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
952
+
953
+ if trainer.is_world_process_zero():
954
+ actual_model = model.module if hasattr(model, "module") else model
955
+ lora_out = os.path.join(training_args.output_dir, "lora")
956
+ os.makedirs(lora_out, exist_ok=True)
957
+
958
+ # LLM PEFT (if any)
959
+ lm = getattr(actual_model.model, "language_model", None)
960
+ if hasattr(lm, "save_pretrained"):
961
+ lm.save_pretrained(lora_out)
962
+
963
+ # Diffusion head PEFT (if any)
964
+ ph = getattr(actual_model.model, "prediction_head", None)
965
+ if hasattr(ph, "save_pretrained"):
966
+ ph_dir = os.path.join(lora_out, "diffusion_head")
967
+ os.makedirs(ph_dir, exist_ok=True)
968
+ ph.save_pretrained(ph_dir)
969
+
970
+ # ALWAYS: full diffusion head state_dict fallback
971
+ try:
972
+ if ph is not None and hasattr(ph, "state_dict"):
973
+ sd = ph.state_dict()
974
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
975
+ ph_dir = os.path.join(lora_out, "diffusion_head")
976
+ os.makedirs(ph_dir, exist_ok=True)
977
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
978
+ except Exception as e:
979
+ logger.warning(f"Failed to save FULL diffusion head at end: {e}")
980
+
981
+ # Connectors (if trained)
982
+ try:
983
+ ac = getattr(actual_model.model, "acoustic_connector", None)
984
+ if ac is not None:
985
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
986
+ os.makedirs(ac_dir, exist_ok=True)
987
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
988
+ except Exception as e:
989
+ logger.warning(f"Failed to save acoustic_connector: {e}")
990
+
991
+ try:
992
+ se = getattr(actual_model.model, "semantic_connector", None)
993
+ if se is not None:
994
+ se_dir = os.path.join(lora_out, "semantic_connector")
995
+ os.makedirs(se_dir, exist_ok=True)
996
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
997
+ except Exception as e:
998
+ logger.warning(f"Failed to save semantic_connector: {e}")
999
+
1000
+ if training_args.do_eval and eval_dataset is not None:
1001
+ trainer.evaluate()
1002
+
1003
+
1004
+ if __name__ == "__main__":
1005
+ main()
VibeVoice-finetuning/src/finetune_vibevoice_lora10.py ADDED
@@ -0,0 +1,1044 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_vibevoice_lora.py
2
+ import os
3
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
4
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
5
+
6
+ import logging
7
+ import os
8
+ from dataclasses import dataclass, field
9
+ from typing import Any, Dict, List, Optional, Tuple
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from datasets import load_dataset, DatasetDict, VerificationMode
15
+
16
+ from transformers import (
17
+ HfArgumentParser,
18
+ Trainer,
19
+ set_seed,
20
+ TrainerCallback,
21
+ )
22
+ from transformers import TrainingArguments as HfTrainingArguments
23
+
24
+ from peft import LoraConfig, get_peft_model, TaskType
25
+
26
+ from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
27
+ from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
28
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
29
+
30
+ from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ # ================== SAMPLE CALLBACK UTILS ==================
35
+
36
+ import copy
37
+ import torch
38
+ from transformers import TrainerCallback
39
+
40
+ class EmaCallback(TrainerCallback):
41
+ def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cuda"):
42
+ """
43
+ attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
44
+ decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
45
+ """
46
+ self.attr_path = attr_path
47
+ self.decay = float(decay)
48
+ self.device = torch.device(device)
49
+ self.shadow = None
50
+ self._orig = None # store non-EMA weights when we swap
51
+
52
+ def _get_module(self, model):
53
+ # Resolve dotted path like "model.prediction_head"
54
+ mod = model
55
+ for name in self.attr_path.split('.'):
56
+ mod = getattr(mod, name)
57
+ return mod
58
+
59
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
60
+ head = self._get_module(model)
61
+ self.shadow = {k: p.detach().to(self.device).clone()
62
+ for k, p in head.state_dict().items()}
63
+
64
+ def on_step_end(self, args, state, control, model=None, **kwargs):
65
+ if self.shadow is None: return
66
+ head = self._get_module(model)
67
+ with torch.no_grad():
68
+ for k, v in head.state_dict().items():
69
+ self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
70
+
71
+ # ---- Swap helpers ----
72
+ def _swap_in_ema(self, model):
73
+ head = self._get_module(model)
74
+ self._orig = copy.deepcopy(head.state_dict())
75
+ head.load_state_dict(self.shadow, strict=False)
76
+
77
+ def _swap_back(self, model):
78
+ if self._orig is None: return
79
+ head = self._get_module(model)
80
+ head.load_state_dict(self._orig, strict=False)
81
+ self._orig = None
82
+
83
+ def on_evaluate(self, args, state, control, model=None, **kwargs):
84
+ # use EMA during eval
85
+ self._swap_in_ema(model)
86
+
87
+ def on_evaluate_end(self, args, state, control, model=None, **kwargs):
88
+ self._swap_back(model)
89
+
90
+ def on_save(self, args, state, control, model=None, **kwargs):
91
+ # temporarily swap to EMA, let Trainer save, then swap back
92
+ self._swap_in_ema(model)
93
+
94
+ def on_save_end(self, args, state, control, model=None, **kwargs):
95
+ self._swap_back(model)
96
+
97
+ def on_train_end(self, args, state, control, model=None, **kwargs):
98
+ # final checkpoint: persist EMA
99
+ self._swap_in_ema(model)
100
+
101
+
102
+ @dataclass
103
+ class ModelArguments:
104
+ model_name_or_path: Optional[str] = field(
105
+ default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
106
+ )
107
+ processor_name_or_path: Optional[str] = field(
108
+ default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
109
+ )
110
+ cache_dir: Optional[str] = field(default=None)
111
+ freeze_acoustic_tokenizer: bool = field(default=True)
112
+ freeze_semantic_tokenizer: bool = field(default=True)
113
+ lora_r: int = field(default=8)
114
+ lora_alpha: int = field(default=32)
115
+ lora_dropout: float = field(default=0.05)
116
+ lora_target_modules: str = field(
117
+ default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
118
+ metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
119
+ )
120
+ lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
121
+ train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
122
+ train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
123
+ layers_to_freeze: Optional[str] = field(
124
+ default=None,
125
+ metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
126
+ )
127
+
128
+ @dataclass
129
+ class DataArguments:
130
+ dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
131
+ dataset_config_name: Optional[str] = field(default=None)
132
+ train_split_name: str = field(default="train")
133
+ eval_split_name: Optional[str] = field(default="validation")
134
+ text_column_name: str = field(default="text")
135
+ audio_column_name: str = field(default="audio")
136
+ voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
137
+ eval_split_size: float = field(default=0.0)
138
+ ignore_verifications: bool = field(default=False)
139
+ max_length: Optional[int] = field(default=None)
140
+ train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
141
+ validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
142
+ voice_prompt_drop_rate: float = field(
143
+ default=0.0,
144
+ metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
145
+ )
146
+
147
+ @dataclass
148
+ class CustomTrainingArguments(HfTrainingArguments):
149
+ ddpm_batch_mul: int = field(default=1)
150
+ ce_loss_weight: float = field(default=1.0)
151
+ diffusion_loss_weight: float = field(default=1.0)
152
+ debug_ce_details: bool = field(default=False)
153
+ debug_ce_topk: int = field(default=5)
154
+ debug_ce_max_examples: int = field(default=1)
155
+ debug_ce_every_n_steps: int = field(default=200)
156
+ gradient_clipping: bool = field(
157
+ default=False,
158
+ metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
159
+ )
160
+ debug_save: bool = field(
161
+ default=False,
162
+ metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
163
+ )
164
+
165
+ def build_lora_config(args: ModelArguments) -> LoraConfig:
166
+ target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
167
+ return LoraConfig(
168
+ r=args.lora_r,
169
+ lora_alpha=args.lora_alpha,
170
+ lora_dropout=args.lora_dropout,
171
+ bias="none",
172
+ task_type=TaskType.CAUSAL_LM,
173
+ target_modules=target_modules,
174
+ )
175
+
176
+ def build_head_lora_config(args: ModelArguments) -> LoraConfig:
177
+ target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
178
+ return LoraConfig(
179
+ r=args.lora_r,
180
+ lora_alpha=args.lora_alpha,
181
+ lora_dropout=args.lora_dropout,
182
+ bias="none",
183
+ task_type=TaskType.FEATURE_EXTRACTION,
184
+ target_modules=target_modules,
185
+ )
186
+
187
+ def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
188
+ shifted = labels[:, 1:].contiguous()
189
+ base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
190
+ label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
191
+ final_mask = base_mask & (~label_is_acoustic)
192
+ out = shifted.clone()
193
+ out[~final_mask] = pad_id
194
+ return out
195
+
196
+ def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
197
+ try:
198
+ acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
199
+ if acoustic is None or not hasattr(acoustic, "encode"):
200
+ logger_.warning("No acoustic_tokenizer.encode() found to patch.")
201
+ return
202
+ base_encode = acoustic.encode
203
+ def encode_wrapped(*args, **kwargs):
204
+ out = base_encode(*args, **kwargs)
205
+ try:
206
+ _ = out[0][0]
207
+ return out
208
+ except Exception:
209
+ pass
210
+ if isinstance(out, dict):
211
+ for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
212
+ if k in out:
213
+ return [[out[k]]]
214
+ if len(out) > 0:
215
+ return [[next(iter(out.values()))]]
216
+ for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
217
+ if hasattr(out, attr):
218
+ return [[getattr(out, attr)]]
219
+ try:
220
+ if isinstance(out, torch.Tensor):
221
+ return [[out]]
222
+ except Exception:
223
+ pass
224
+ return [[out]]
225
+ acoustic.encode = encode_wrapped
226
+ logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
227
+ except Exception as e:
228
+ logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
229
+
230
+ def main() -> None:
231
+ parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
232
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
233
+
234
+ logging.basicConfig(
235
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
236
+ datefmt="%m/%d/%Y %H:%M:%S",
237
+ level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
238
+ )
239
+ logger.info("Training/evaluation parameters %s", training_args)
240
+ set_seed(training_args.seed)
241
+
242
+ # Configure gradient clipping
243
+ if not getattr(training_args, "gradient_clipping", False):
244
+ if hasattr(training_args, "max_grad_norm"):
245
+ training_args.max_grad_norm = 0.0
246
+ logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
247
+ else:
248
+ if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
249
+ training_args.max_grad_norm = 1.0
250
+ logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
251
+
252
+ # Load processor
253
+ processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
254
+ if processor_path is None:
255
+ raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
256
+ processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
257
+
258
+ # Required special tokens
259
+ tok = processor.tokenizer
260
+ for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
261
+ if not hasattr(tok, required) or getattr(tok, required) is None:
262
+ raise RuntimeError(f"Tokenizer missing required special id: {required}")
263
+
264
+ # Load model
265
+ if model_args.model_name_or_path is None:
266
+ raise ValueError("--model_name_or_path is required to load VibeVoice base model")
267
+ dtype = torch.float32
268
+ if training_args.bf16:
269
+ dtype = torch.bfloat16
270
+ elif getattr(training_args, "fp16", False):
271
+ dtype = torch.float16
272
+ model = VibeVoiceForConditionalGeneration.from_pretrained(
273
+ model_args.model_name_or_path,
274
+ torch_dtype=dtype, device_map={"": 0},
275
+ )
276
+ _patch_acoustic_encode_for_legacy_indexing(model, logger)
277
+ processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
278
+
279
+ # Diagnostics: LM head tie
280
+ try:
281
+ in_emb_mod = model.get_input_embeddings()
282
+ out_emb_mod = model.get_output_embeddings()
283
+ in_w = getattr(in_emb_mod, "weight", None)
284
+ out_w = getattr(out_emb_mod, "weight", None)
285
+ shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
286
+ values_equal = False
287
+ if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
288
+ try:
289
+ values_equal = bool(torch.allclose(in_w, out_w))
290
+ except Exception:
291
+ values_equal = False
292
+ try:
293
+ tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
294
+ except Exception:
295
+ tie_cfg = getattr(model.config, "tie_word_embeddings", None)
296
+ logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
297
+ if out_w is not None:
298
+ logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
299
+ except Exception as e:
300
+ logger.warning(f"LM head tie diagnostics failed: {e}")
301
+
302
+ # Hard-tie LM head
303
+ try:
304
+ emb_module = model.get_input_embeddings()
305
+ head_module = model.get_output_embeddings()
306
+ if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
307
+ if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
308
+ with torch.no_grad():
309
+ head_module.weight = emb_module.weight
310
+ logger.info("Force-tied LM head weight to input embeddings (pointer share).")
311
+ except Exception as e:
312
+ logger.warning(f"Force-tie of LM head failed: {e}")
313
+
314
+ # Validate special IDs (info logs only)
315
+ try:
316
+ special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
317
+ try:
318
+ vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
319
+ except Exception:
320
+ vocab_size = 0
321
+ in_emb_mod = model.get_input_embeddings()
322
+ out_emb_mod = model.get_output_embeddings()
323
+ in_w = getattr(in_emb_mod, "weight", None)
324
+ out_w = getattr(out_emb_mod, "weight", None)
325
+ for name in special_names:
326
+ val = getattr(tok, name, None)
327
+ exists = (val is not None)
328
+ in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
329
+ equal_row = None
330
+ if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
331
+ try:
332
+ equal_row = bool(torch.allclose(in_w[val], out_w[val]))
333
+ except Exception:
334
+ equal_row = False
335
+ decoded_str = None
336
+ if exists and isinstance(val, int):
337
+ try:
338
+ decoded_str = tok.decode([val])
339
+ except Exception:
340
+ try:
341
+ decoded_str = tok.convert_ids_to_tokens(val)
342
+ except Exception:
343
+ decoded_str = "<decode_failed>"
344
+ logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
345
+ except Exception as e:
346
+ logger.warning(f"Special token ID/row validation failed: {e}")
347
+
348
+ # Quick tokenizer diagnostics (optional)
349
+ try:
350
+ logger.info("=== TOKENIZER DIAGNOSTICS ===")
351
+ logger.info(f"Tokenizer class: {type(tok).__name__}")
352
+ logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
353
+ # tiny CE smoke test
354
+ with torch.no_grad():
355
+ simple_text = "The cat sat on the mat."
356
+ simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
357
+ simple_mask = torch.ones_like(simple_ids)
358
+ x = model.get_input_embeddings()(simple_ids)
359
+ outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
360
+ logits = model.lm_head(outputs.last_hidden_state)
361
+ shift_logits = logits[:, :-1, :].contiguous()
362
+ shift_labels = simple_ids[:, 1:].contiguous()
363
+ ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
364
+ logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
365
+ except Exception as e:
366
+ logger.warning(f"Tokenizer diagnostics failed: {e}")
367
+
368
+ # Disable cache during training
369
+ if hasattr(model.config, "use_cache") and training_args.do_train:
370
+ model.config.use_cache = False
371
+
372
+ # Freeze tokenizers
373
+ if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
374
+ for p in model.model.acoustic_tokenizer.parameters():
375
+ p.requires_grad = False
376
+ if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
377
+ for p in model.model.semantic_tokenizer.parameters():
378
+ p.requires_grad = False
379
+
380
+ # LoRA wrap LLM (optional)
381
+ lora_cfg = build_lora_config(model_args)
382
+ tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
383
+ skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
384
+ if not skip_lm_lora:
385
+ model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
386
+ else:
387
+ logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
388
+
389
+ try:
390
+ model.tie_weights()
391
+ except Exception:
392
+ pass
393
+
394
+ # Freeze all then enable trainable subsets
395
+ for _, p in model.named_parameters():
396
+ p.requires_grad = False
397
+
398
+ try:
399
+ for n, p in model.model.language_model.named_parameters():
400
+ if "lora_A" in n or "lora_B" in n:
401
+ p.requires_grad = True
402
+ except Exception:
403
+ logger.warning("Could not re-enable LoRA params on language_model.")
404
+
405
+ # Diffusion head LoRA wrapping (optional)
406
+ if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
407
+ class _HeadForwardShim(nn.Module):
408
+ def __init__(self, base: nn.Module): super().__init__(); self.base = base
409
+ def forward(self, *args, **kwargs):
410
+ if len(args) >= 3:
411
+ noisy_images, timesteps, condition = args[:3]
412
+ else:
413
+ noisy_images = kwargs.get("noisy_images")
414
+ timesteps = kwargs.get("timesteps")
415
+ condition = kwargs.get("condition")
416
+ return self.base(noisy_images, timesteps, condition)
417
+ try:
418
+ shim = _HeadForwardShim(model.model.prediction_head)
419
+ model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
420
+ for n, p in model.model.prediction_head.named_parameters():
421
+ if "lora_A" in n or "lora_B" in n:
422
+ p.requires_grad = True
423
+ except Exception as e:
424
+ logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
425
+
426
+ # Train full diffusion head (optional)
427
+ if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
428
+ for p in model.model.prediction_head.parameters():
429
+ p.requires_grad = True
430
+
431
+ # Freeze diffusion head layers (optional)
432
+ if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
433
+ head_params = list(model.model.prediction_head.named_parameters())
434
+ try:
435
+ indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
436
+ frozen_count = 0
437
+ for i, (name, param) in enumerate(head_params):
438
+ if i in indices_to_freeze:
439
+ param.requires_grad = False
440
+ frozen_count += 1
441
+ logger.info(f"Froze layer [{i}]: {name}")
442
+ logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
443
+ except Exception as e:
444
+ logger.error(f"Could not parse --layers_to_freeze: {e}")
445
+ raise
446
+
447
+ # Connectors
448
+ if getattr(model_args, "train_connectors", False):
449
+ if hasattr(model.model, "acoustic_connector"):
450
+ for p in model.model.acoustic_connector.parameters():
451
+ p.requires_grad = True
452
+ if hasattr(model.model, "semantic_connector"):
453
+ for p in model.model.semantic_connector.parameters():
454
+ p.requires_grad = True
455
+ else:
456
+ if hasattr(model.model, "acoustic_connector"):
457
+ for p in model.model.acoustic_connector.parameters():
458
+ p.requires_grad = False
459
+ if hasattr(model.model, "semantic_connector"):
460
+ for p in model.model.semantic_connector.parameters():
461
+ p.requires_grad = False
462
+
463
+ # Freeze embedding + head
464
+ try:
465
+ emb = model.get_input_embeddings()
466
+ if hasattr(emb, "weight"):
467
+ emb.weight.requires_grad_(False)
468
+ head = model.get_output_embeddings()
469
+ if head is not None and hasattr(head, "weight"):
470
+ head.weight.requires_grad_(False)
471
+ except Exception:
472
+ pass
473
+
474
+ # Diagnostics
475
+ def _sum_params(named_iter):
476
+ return sum(p.numel() for _, p in named_iter if p.requires_grad)
477
+ try:
478
+ lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
479
+ pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
480
+ ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
481
+ se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
482
+ total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
483
+ logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
484
+ logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
485
+ except Exception:
486
+ pass
487
+
488
+ # Preprocessed data classes
489
+ class PreprocessedBatchDataset:
490
+ def __init__(self, preprocessed_file: str):
491
+ self.data = torch.load(preprocessed_file, map_location='cpu')
492
+ logger.info(f"Loaded {len(self.data)} preprocessed batches from {preprocessed_file}")
493
+
494
+ def __len__(self):
495
+ return len(self.data)
496
+
497
+ def __getitem__(self, idx):
498
+ batch = self.data[idx]
499
+ result = {}
500
+ for k, v in batch.items():
501
+ if isinstance(v, torch.Tensor):
502
+ result[k] = v
503
+ else:
504
+ result[k] = v
505
+ return result
506
+
507
+ class PreprocessedBatchSubset:
508
+ def __init__(self, dataset: 'PreprocessedBatchDataset', indices: List[int]):
509
+ self.dataset = dataset
510
+ self.indices = indices
511
+
512
+ def __len__(self):
513
+ return len(self.indices)
514
+
515
+ def __getitem__(self, idx):
516
+ actual_idx = self.indices[idx]
517
+ return self.dataset[actual_idx]
518
+
519
+ class PreprocessedBatchCollator:
520
+ def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
521
+ if not batch:
522
+ return {}
523
+ result = {}
524
+ for key in batch[0].keys():
525
+ tensors = [b[key] for b in batch if b[key] is not None]
526
+ if tensors and isinstance(tensors[0], torch.Tensor):
527
+ result[key] = torch.cat(tensors, dim=0)
528
+ else:
529
+ result[key] = tensors[0] if tensors else None
530
+ return result
531
+
532
+ # Datasets
533
+ preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
534
+ preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
535
+
536
+ if os.path.exists(preprocessed_file):
537
+ logger.info(f"Loading preprocessed data from {preprocessed_file}")
538
+ preprocessed_data = PreprocessedBatchDataset(preprocessed_file)
539
+
540
+ train_dataset = preprocessed_data
541
+ eval_dataset = None
542
+
543
+ if training_args.do_eval and data_args.eval_split_size and data_args.eval_split_size > 0 and len(preprocessed_data) > 1:
544
+ num_eval = max(1, int(len(preprocessed_data) * data_args.eval_split_size))
545
+ num_train = len(preprocessed_data) - num_eval
546
+ indices = list(range(len(preprocessed_data)))
547
+ import random
548
+ random.Random(training_args.seed).shuffle(indices)
549
+ train_indices = indices[:num_train]
550
+ eval_indices = indices[num_train:]
551
+ train_dataset = PreprocessedBatchSubset(preprocessed_data, train_indices)
552
+ eval_dataset = PreprocessedBatchSubset(preprocessed_data, eval_indices)
553
+ else:
554
+ logger.info(f"Preprocessed data not found at {preprocessed_file}, loading from raw JSONL/HF datasets")
555
+ verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
556
+ if data_args.train_jsonl is not None:
557
+ data_files: Dict[str, str] = {"train": data_args.train_jsonl}
558
+ if data_args.validation_jsonl is not None:
559
+ data_files["validation"] = data_args.validation_jsonl
560
+ raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
561
+ else:
562
+ if data_args.dataset_name is None:
563
+ raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
564
+ raw = load_dataset(
565
+ data_args.dataset_name,
566
+ data_args.dataset_config_name,
567
+ verification_mode=verification_mode,
568
+ cache_dir=model_args.cache_dir,
569
+ )
570
+ train_ds = raw[data_args.train_split_name]
571
+ eval_ds = None
572
+ if training_args.do_eval:
573
+ if data_args.eval_split_name and data_args.eval_split_name in raw:
574
+ eval_ds = raw[data_args.eval_split_name]
575
+ elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
576
+ split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
577
+ train_ds, eval_ds = split["train"], split["test"]
578
+
579
+ train_dataset = VibeVoiceDataset(
580
+ train_ds,
581
+ text_column=data_args.text_column_name,
582
+ audio_column=data_args.audio_column_name,
583
+ voice_prompts_column=data_args.voice_prompts_column_name,
584
+ )
585
+ eval_dataset = None
586
+ if eval_ds is not None:
587
+ eval_dataset = VibeVoiceDataset(
588
+ eval_ds,
589
+ text_column=data_args.text_column_name,
590
+ audio_column=data_args.audio_column_name,
591
+ voice_prompts_column=data_args.voice_prompts_column_name,
592
+ )
593
+
594
+ # Ratios/dims from processor+model
595
+ speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
596
+ semantic_dim = getattr(model.config, "semantic_vae_dim", None)
597
+ if semantic_dim is None:
598
+ try:
599
+ semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
600
+ except Exception:
601
+ semantic_dim = 128
602
+
603
+ compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
604
+
605
+ if os.path.exists(preprocessed_file):
606
+ data_collator = PreprocessedBatchCollator()
607
+ else:
608
+ data_collator = VibeVoiceCollator(
609
+ processor=processor,
610
+ max_length=data_args.max_length,
611
+ speech_compress_ratio=speech_compress_ratio,
612
+ semantic_vae_dim=semantic_dim,
613
+ compute_semantics=compute_semantics_flag,
614
+ debug_checks=False,
615
+ voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
616
+ )
617
+
618
+ class LoRADebugCallback(TrainerCallback):
619
+ def __init__(self, log_every_n_steps: int = 50):
620
+ self.log_every_n_steps = max(1, int(log_every_n_steps))
621
+ self.prev_param_norms: Dict[str, float] = {}
622
+ self.lora_param_names: List[str] = []
623
+
624
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
625
+ try:
626
+ if model is None:
627
+ return
628
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
629
+ self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
630
+ for n in self.lora_param_names:
631
+ p = named[n]
632
+ self.prev_param_norms[n] = float(p.data.norm().item())
633
+ total = len(self.lora_param_names)
634
+ req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
635
+ num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
636
+ num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
637
+ zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
638
+ logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
639
+ if total == 0:
640
+ logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
641
+ if req_grad != total:
642
+ logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
643
+ except Exception as e:
644
+ logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
645
+
646
+ def on_step_end(self, args, state, control, model=None, **kwargs):
647
+ try:
648
+ if model is None or len(self.lora_param_names) == 0:
649
+ return
650
+ step = int(getattr(state, "global_step", 0) or 0)
651
+ if step % self.log_every_n_steps != 0 and step != 1:
652
+ return
653
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
654
+ changed_A = 0
655
+ changed_B = 0
656
+ zero_B = 0
657
+ eps = 1e-12
658
+ for n in self.lora_param_names:
659
+ p = named.get(n, None)
660
+ if p is None:
661
+ continue
662
+ prev = self.prev_param_norms.get(n, 0.0)
663
+ curr = float(p.data.norm().item())
664
+ if "lora_A" in n and abs(curr - prev) > eps:
665
+ changed_A += 1
666
+ if "lora_B" in n:
667
+ if abs(curr - prev) > eps:
668
+ changed_B += 1
669
+ if curr == 0.0:
670
+ zero_B += 1
671
+ self.prev_param_norms[n] = curr
672
+ total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
673
+ total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
674
+ logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
675
+ except Exception as e:
676
+ logger.warning(f"LoRA debug (on_step_end) failed: {e}")
677
+
678
+ class VibeVoiceTrainer(Trainer):
679
+ def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
680
+ labels = inputs.get("input_ids")
681
+ attention_mask = inputs.get("attention_mask")
682
+ acoustic_input_mask = inputs.get("acoustic_input_mask")
683
+
684
+ # Ensure semantic tensors exist and have correct dtype/device
685
+ sem = inputs.get("speech_semantic_tensors", None)
686
+ try:
687
+ target_dtype = next(model.model.semantic_connector.parameters()).dtype
688
+ except Exception:
689
+ target_dtype = model.get_input_embeddings().weight.dtype
690
+
691
+ if sem is None:
692
+ sm = inputs.get("speech_masks")
693
+ if sm is not None:
694
+ zeros = torch.zeros(
695
+ sm.size(0), sm.size(1),
696
+ getattr(model.config, "semantic_vae_dim", 128),
697
+ dtype=target_dtype,
698
+ device=sm.device,
699
+ )
700
+ inputs["speech_semantic_tensors"] = zeros
701
+ else:
702
+ if isinstance(sem, torch.Tensor):
703
+ inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
704
+
705
+ outputs = model(
706
+ input_ids=inputs.get("input_ids"),
707
+ attention_mask=attention_mask,
708
+ speech_tensors=inputs.get("speech_tensors"),
709
+ speech_masks=inputs.get("speech_masks"),
710
+ speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
711
+ acoustic_input_mask=acoustic_input_mask,
712
+ acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
713
+ speeches_loss_input=inputs.get("speeches_loss_input"),
714
+ ddpm_batch_mul=training_args.ddpm_batch_mul,
715
+ )
716
+
717
+ # Invariants: token/latent selection equality across views (warn, don't assert)
718
+ try:
719
+ al_mask = inputs.get("acoustic_loss_mask")
720
+ sp_masks = inputs.get("speech_masks")
721
+ sp_loss_sel = inputs.get("speeches_loss_input")
722
+ num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
723
+ num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
724
+ num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
725
+ num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
726
+ self.log({
727
+ "debug/num_tok_total": float(num_tok_total),
728
+ "debug/num_tok_loss": float(num_tok_loss),
729
+ "debug/num_lat_total": float(num_lat_total),
730
+ "debug/num_lat_loss": float(num_lat_loss),
731
+ })
732
+ if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
733
+ if num_tok_loss != num_lat_loss:
734
+ logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
735
+ except Exception:
736
+ pass
737
+
738
+ # CE Loss
739
+ logits = outputs.logits
740
+ ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
741
+ shift_logits = logits[:, :-1, :].contiguous()
742
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
743
+ ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
744
+
745
+ # Optional CE diagnostics
746
+ try:
747
+ self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
748
+ except Exception as e:
749
+ logger.warning(f"Failed invoking CE debug: {e}")
750
+
751
+ # Diffusion loss
752
+ diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
753
+ total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
754
+
755
+ # Logs
756
+ try:
757
+ prefix = "train" if model.training else "eval"
758
+ self.log({
759
+ f"{prefix}/ce_loss": ce_loss.detach().item(),
760
+ f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
761
+ })
762
+ if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
763
+ lr_val = self.optimizer.param_groups[0].get("lr", None)
764
+ if lr_val is not None:
765
+ self.log({"train/learning_rate_real": float(lr_val)})
766
+ except Exception:
767
+ pass
768
+
769
+ return (total, outputs) if return_outputs else total
770
+
771
+ def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
772
+ try:
773
+ if not getattr(training_args, "debug_ce_details", False):
774
+ return
775
+ step = int(getattr(self.state, "global_step", 0) or 0)
776
+ every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
777
+ if not (step <= 1 or (step % every_n == 0)):
778
+ return
779
+
780
+ with torch.no_grad():
781
+ vocab = shift_logits.size(-1)
782
+ per_token_loss = F.cross_entropy(
783
+ shift_logits.view(-1, vocab),
784
+ ce_labels.view(-1),
785
+ reduction="none",
786
+ ignore_index=-100,
787
+ ).view_as(ce_labels)
788
+
789
+ valid_mask = ce_labels.ne(-100)
790
+ num_valid = int(valid_mask.sum().item())
791
+ avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
792
+
793
+ per_ex_avgs = []
794
+ max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
795
+ B = ce_labels.size(0)
796
+ for b in range(min(B, max_examples)):
797
+ vb = valid_mask[b]
798
+ if int(vb.sum().item()) > 0:
799
+ per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
800
+ else:
801
+ per_ex_avgs.append(float("nan"))
802
+ logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
803
+ except Exception as e:
804
+ logger.warning(f"CE detailed debug failed: {e}")
805
+
806
+ # --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
807
+
808
+
809
+ def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
810
+ try:
811
+ target_dir = output_dir or self.args.output_dir
812
+ lora_out = os.path.join(target_dir, "lora")
813
+ os.makedirs(lora_out, exist_ok=True)
814
+
815
+ # --- LLM PEFT adapters (if LoRA-wrapped) ---
816
+ language_model = getattr(self.model.model, "language_model", None)
817
+ if hasattr(language_model, "save_pretrained"):
818
+ language_model.save_pretrained(lora_out)
819
+
820
+ # --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
821
+ pred_head = getattr(self.model.model, "prediction_head", None)
822
+ if hasattr(pred_head, "save_pretrained"):
823
+ ph_dir = os.path.join(lora_out, "diffusion_head")
824
+ os.makedirs(ph_dir, exist_ok=True)
825
+ pred_head.save_pretrained(ph_dir)
826
+
827
+ # --- ALWAYS save FULL diffusion head state_dict for fallback ---
828
+ if pred_head is not None and hasattr(pred_head, "state_dict"):
829
+ sd = pred_head.state_dict()
830
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
831
+ ph_dir = os.path.join(lora_out, "diffusion_head")
832
+ os.makedirs(ph_dir, exist_ok=True)
833
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
834
+
835
+ # --- Connectors (plain state_dicts) ---
836
+ ac = getattr(self.model.model, "acoustic_connector", None)
837
+ if ac is not None:
838
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
839
+ os.makedirs(ac_dir, exist_ok=True)
840
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
841
+
842
+ se = getattr(self.model.model, "semantic_connector", None)
843
+ if se is not None:
844
+ se_dir = os.path.join(lora_out, "semantic_connector")
845
+ os.makedirs(se_dir, exist_ok=True)
846
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
847
+
848
+ except Exception as e:
849
+ logger.warning(f"Failed to save LoRA assets: {e}")
850
+
851
+
852
+ # ------------- Build the Trainer -------------
853
+
854
+ # Resolve which adapters to apply in samples
855
+
856
+ ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cuda")
857
+
858
+ # --- CRITICAL FIX: CAST TRAINABLE PARAMS TO FP32 ---
859
+ # This prevents 'ValueError: Attempting to unscale FP16 gradients'
860
+ if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
861
+ print('>>> INFO: Enforcing float32 for trainable parameters (LoRA/Head) to fix GradScaler.')
862
+ for name, param in model.named_parameters():
863
+ if param.requires_grad:
864
+ param.data = param.data.to(torch.float32)
865
+ # ---------------------------------------------------
866
+
867
+ trainer = VibeVoiceTrainer(
868
+ model=model,
869
+ args=training_args,
870
+ train_dataset=train_dataset,
871
+ eval_dataset=eval_dataset,
872
+ data_collator=data_collator,
873
+ callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
874
+ )
875
+
876
+ # Optional debug pre-training save
877
+ if getattr(training_args, "debug_save", False):
878
+ try:
879
+ debug_dir = os.path.join(training_args.output_dir, "debug_initial")
880
+ lora_out = os.path.join(debug_dir, "lora")
881
+ os.makedirs(lora_out, exist_ok=True)
882
+ logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
883
+ # language model adapters / base
884
+ try:
885
+ if hasattr(model.model.language_model, "save_pretrained"):
886
+ model.model.language_model.save_pretrained(lora_out)
887
+ except Exception as e_lm:
888
+ logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
889
+ # diffusion head
890
+ try:
891
+ if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
892
+ model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
893
+ except Exception as e_head:
894
+ logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
895
+ # NEW: full diffusion head state_dict as fallback
896
+ try:
897
+ ph = getattr(model.model, "prediction_head", None)
898
+ if ph is not None and hasattr(ph, "state_dict"):
899
+ sd = ph.state_dict()
900
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
901
+ os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
902
+ torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
903
+ except Exception as e:
904
+ logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
905
+ # connectors
906
+ try:
907
+ ac_conn = getattr(model.model, "acoustic_connector", None)
908
+ if ac_conn is not None:
909
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
910
+ os.makedirs(ac_dir, exist_ok=True)
911
+ torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
912
+ except Exception as e_ac:
913
+ logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
914
+ try:
915
+ se_conn = getattr(model.model, "semantic_connector", None)
916
+ if se_conn is not None:
917
+ se_dir = os.path.join(lora_out, "semantic_connector")
918
+ os.makedirs(se_dir, exist_ok=True)
919
+ torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
920
+ except Exception as e_se:
921
+ logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
922
+ except Exception as e:
923
+ logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
924
+
925
+ if getattr(training_args, "gradient_checkpointing", False):
926
+ try:
927
+ model.gradient_checkpointing_enable()
928
+ except Exception:
929
+ logger.warning("Failed to enable gradient checkpointing on the model.")
930
+
931
+ # =========================================================================
932
+ # Load Custom Weights from Checkpoint before resuming training
933
+ # =========================================================================
934
+ if training_args.do_train and training_args.resume_from_checkpoint:
935
+ checkpoint_path = None
936
+ if isinstance(training_args.resume_from_checkpoint, bool) and training_args.resume_from_checkpoint:
937
+ from transformers.trainer_utils import get_last_checkpoint
938
+ checkpoint_path = get_last_checkpoint(training_args.output_dir)
939
+ else:
940
+ checkpoint_path = training_args.resume_from_checkpoint
941
+
942
+ if checkpoint_path is not None and os.path.exists(checkpoint_path):
943
+ lora_dir = os.path.join(checkpoint_path, "lora")
944
+ if os.path.exists(lora_dir):
945
+ logger.info(f"*** Resuming custom weights (LoRA, Connectors, Head) from {lora_dir} ***")
946
+
947
+ # 1. Load LLM LoRA
948
+ if hasattr(model.model, "language_model"):
949
+ try:
950
+ from peft import load_peft_weights, set_peft_model_state_dict
951
+ adapters_weights = load_peft_weights(lora_dir)
952
+ set_peft_model_state_dict(model.model.language_model, adapters_weights)
953
+ logger.info("Successfully loaded LLM LoRA weights.")
954
+ except Exception as e:
955
+ logger.warning(f"Could not load LLM LoRA weights: {e}")
956
+
957
+ # 2. Load Diffusion Head
958
+ ph_full_path = os.path.join(lora_dir, "diffusion_head_full.bin")
959
+ if os.path.exists(ph_full_path) and hasattr(model.model, "prediction_head"):
960
+ try:
961
+ model.model.prediction_head.load_state_dict(torch.load(ph_full_path, map_location="cpu"), strict=False)
962
+ logger.info("Successfully loaded Diffusion Head weights.")
963
+ except Exception as e:
964
+ logger.warning(f"Failed to load Diffusion Head weights: {e}")
965
+
966
+ # 3. Load Acoustic Connector
967
+ ac_path = os.path.join(lora_dir, "acoustic_connector", "pytorch_model.bin")
968
+ if os.path.exists(ac_path) and hasattr(model.model, "acoustic_connector"):
969
+ try:
970
+ model.model.acoustic_connector.load_state_dict(torch.load(ac_path, map_location="cpu"))
971
+ logger.info("Successfully loaded Acoustic Connector weights.")
972
+ except Exception as e:
973
+ logger.warning(f"Failed to load Acoustic Connector weights: {e}")
974
+
975
+ # 4. Load Semantic Connector
976
+ se_path = os.path.join(lora_dir, "semantic_connector", "pytorch_model.bin")
977
+ if os.path.exists(se_path) and hasattr(model.model, "semantic_connector"):
978
+ try:
979
+ model.model.semantic_connector.load_state_dict(torch.load(se_path, map_location="cpu"))
980
+ logger.info("Successfully loaded Semantic Connector weights.")
981
+ except Exception as e:
982
+ logger.warning(f"Failed to load Semantic Connector weights: {e}")
983
+ else:
984
+ logger.warning(f"No custom 'lora' directory found inside checkpoint: {checkpoint_path}")
985
+ # =========================================================================
986
+
987
+ if training_args.do_train:
988
+ # ----- THE FIX: SET resume_from_checkpoint=False HERE -----
989
+ # The weights are ALREADY loaded via the custom block above.
990
+ # Setting this to False forces Trainer to start counting steps/epochs from 0
991
+ # for your new dataset, preventing it from immediately exiting.
992
+ trainer.train(resume_from_checkpoint=False)
993
+
994
+ lora_out = os.path.join(training_args.output_dir, "lora")
995
+ os.makedirs(lora_out, exist_ok=True)
996
+
997
+ # LLM PEFT (if any)
998
+ lm = getattr(model.model, "language_model", None)
999
+ if hasattr(lm, "save_pretrained"):
1000
+ lm.save_pretrained(lora_out)
1001
+
1002
+ # Diffusion head PEFT (if any)
1003
+ ph = getattr(model.model, "prediction_head", None)
1004
+ if hasattr(ph, "save_pretrained"):
1005
+ ph_dir = os.path.join(lora_out, "diffusion_head")
1006
+ os.makedirs(ph_dir, exist_ok=True)
1007
+ ph.save_pretrained(ph_dir)
1008
+
1009
+ # ALWAYS: full diffusion head state_dict fallback
1010
+ try:
1011
+ if ph is not None and hasattr(ph, "state_dict"):
1012
+ sd = ph.state_dict()
1013
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
1014
+ ph_dir = os.path.join(lora_out, "diffusion_head")
1015
+ os.makedirs(ph_dir, exist_ok=True)
1016
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
1017
+ except Exception as e:
1018
+ logger.warning(f"Failed to save FULL diffusion head at end: {e}")
1019
+
1020
+ # Connectors (if trained)
1021
+ try:
1022
+ ac = getattr(model.model, "acoustic_connector", None)
1023
+ if ac is not None:
1024
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
1025
+ os.makedirs(ac_dir, exist_ok=True)
1026
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
1027
+ except Exception as e:
1028
+ logger.warning(f"Failed to save acoustic_connector: {e}")
1029
+
1030
+ try:
1031
+ se = getattr(model.model, "semantic_connector", None)
1032
+ if se is not None:
1033
+ se_dir = os.path.join(lora_out, "semantic_connector")
1034
+ os.makedirs(se_dir, exist_ok=True)
1035
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
1036
+ except Exception as e:
1037
+ logger.warning(f"Failed to save semantic_connector: {e}")
1038
+
1039
+ if training_args.do_eval and eval_dataset is not None:
1040
+ trainer.evaluate()
1041
+
1042
+
1043
+ if __name__ == "__main__":
1044
+ main()
VibeVoice-finetuning/src/finetune_vibevoice_lora105.py ADDED
@@ -0,0 +1,1044 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_vibevoice_lora.py
2
+ import os
3
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
4
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
5
+
6
+ import logging
7
+ import os
8
+ from dataclasses import dataclass, field
9
+ from typing import Any, Dict, List, Optional, Tuple
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from datasets import load_dataset, DatasetDict, VerificationMode
15
+
16
+ from transformers import (
17
+ HfArgumentParser,
18
+ Trainer,
19
+ set_seed,
20
+ TrainerCallback,
21
+ )
22
+ from transformers import TrainingArguments as HfTrainingArguments
23
+
24
+ from peft import LoraConfig, get_peft_model, TaskType
25
+
26
+ from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
27
+ from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
28
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
29
+
30
+ from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ # ================== SAMPLE CALLBACK UTILS ==================
35
+
36
+ import copy
37
+ import torch
38
+ from transformers import TrainerCallback
39
+
40
+ class EmaCallback(TrainerCallback):
41
+ def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cuda"):
42
+ """
43
+ attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
44
+ decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
45
+ """
46
+ self.attr_path = attr_path
47
+ self.decay = float(decay)
48
+ self.device = torch.device(device)
49
+ self.shadow = None
50
+ self._orig = None # store non-EMA weights when we swap
51
+
52
+ def _get_module(self, model):
53
+ # Resolve dotted path like "model.prediction_head"
54
+ mod = model
55
+ for name in self.attr_path.split('.'):
56
+ mod = getattr(mod, name)
57
+ return mod
58
+
59
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
60
+ head = self._get_module(model)
61
+ self.shadow = {k: p.detach().to(self.device).clone()
62
+ for k, p in head.state_dict().items()}
63
+
64
+ def on_step_end(self, args, state, control, model=None, **kwargs):
65
+ if self.shadow is None: return
66
+ head = self._get_module(model)
67
+ with torch.no_grad():
68
+ for k, v in head.state_dict().items():
69
+ self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
70
+
71
+ # ---- Swap helpers ----
72
+ def _swap_in_ema(self, model):
73
+ head = self._get_module(model)
74
+ self._orig = copy.deepcopy(head.state_dict())
75
+ head.load_state_dict(self.shadow, strict=False)
76
+
77
+ def _swap_back(self, model):
78
+ if self._orig is None: return
79
+ head = self._get_module(model)
80
+ head.load_state_dict(self._orig, strict=False)
81
+ self._orig = None
82
+
83
+ def on_evaluate(self, args, state, control, model=None, **kwargs):
84
+ # use EMA during eval
85
+ self._swap_in_ema(model)
86
+
87
+ def on_evaluate_end(self, args, state, control, model=None, **kwargs):
88
+ self._swap_back(model)
89
+
90
+ def on_save(self, args, state, control, model=None, **kwargs):
91
+ # temporarily swap to EMA, let Trainer save, then swap back
92
+ self._swap_in_ema(model)
93
+
94
+ def on_save_end(self, args, state, control, model=None, **kwargs):
95
+ self._swap_back(model)
96
+
97
+ def on_train_end(self, args, state, control, model=None, **kwargs):
98
+ # final checkpoint: persist EMA
99
+ self._swap_in_ema(model)
100
+
101
+
102
+ @dataclass
103
+ class ModelArguments:
104
+ model_name_or_path: Optional[str] = field(
105
+ default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
106
+ )
107
+ processor_name_or_path: Optional[str] = field(
108
+ default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
109
+ )
110
+ cache_dir: Optional[str] = field(default=None)
111
+ freeze_acoustic_tokenizer: bool = field(default=True)
112
+ freeze_semantic_tokenizer: bool = field(default=True)
113
+ lora_r: int = field(default=8)
114
+ lora_alpha: int = field(default=32)
115
+ lora_dropout: float = field(default=0.05)
116
+ lora_target_modules: str = field(
117
+ default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
118
+ metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
119
+ )
120
+ lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
121
+ train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
122
+ train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
123
+ layers_to_freeze: Optional[str] = field(
124
+ default=None,
125
+ metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
126
+ )
127
+
128
+ @dataclass
129
+ class DataArguments:
130
+ dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
131
+ dataset_config_name: Optional[str] = field(default=None)
132
+ train_split_name: str = field(default="train")
133
+ eval_split_name: Optional[str] = field(default="validation")
134
+ text_column_name: str = field(default="text")
135
+ audio_column_name: str = field(default="audio")
136
+ voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
137
+ eval_split_size: float = field(default=0.0)
138
+ ignore_verifications: bool = field(default=False)
139
+ max_length: Optional[int] = field(default=None)
140
+ train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
141
+ validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
142
+ voice_prompt_drop_rate: float = field(
143
+ default=0.0,
144
+ metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
145
+ )
146
+
147
+ @dataclass
148
+ class CustomTrainingArguments(HfTrainingArguments):
149
+ ddpm_batch_mul: int = field(default=1)
150
+ ce_loss_weight: float = field(default=1.0)
151
+ diffusion_loss_weight: float = field(default=1.0)
152
+ debug_ce_details: bool = field(default=False)
153
+ debug_ce_topk: int = field(default=5)
154
+ debug_ce_max_examples: int = field(default=1)
155
+ debug_ce_every_n_steps: int = field(default=200)
156
+ gradient_clipping: bool = field(
157
+ default=False,
158
+ metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
159
+ )
160
+ debug_save: bool = field(
161
+ default=False,
162
+ metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
163
+ )
164
+
165
+ def build_lora_config(args: ModelArguments) -> LoraConfig:
166
+ target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
167
+ return LoraConfig(
168
+ r=args.lora_r,
169
+ lora_alpha=args.lora_alpha,
170
+ lora_dropout=args.lora_dropout,
171
+ bias="none",
172
+ task_type=TaskType.CAUSAL_LM,
173
+ target_modules=target_modules,
174
+ )
175
+
176
+ def build_head_lora_config(args: ModelArguments) -> LoraConfig:
177
+ target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
178
+ return LoraConfig(
179
+ r=args.lora_r,
180
+ lora_alpha=args.lora_alpha,
181
+ lora_dropout=args.lora_dropout,
182
+ bias="none",
183
+ task_type=TaskType.FEATURE_EXTRACTION,
184
+ target_modules=target_modules,
185
+ )
186
+
187
+ def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
188
+ shifted = labels[:, 1:].contiguous()
189
+ base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
190
+ label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
191
+ final_mask = base_mask & (~label_is_acoustic)
192
+ out = shifted.clone()
193
+ out[~final_mask] = pad_id
194
+ return out
195
+
196
+ def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
197
+ try:
198
+ acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
199
+ if acoustic is None or not hasattr(acoustic, "encode"):
200
+ logger_.warning("No acoustic_tokenizer.encode() found to patch.")
201
+ return
202
+ base_encode = acoustic.encode
203
+ def encode_wrapped(*args, **kwargs):
204
+ out = base_encode(*args, **kwargs)
205
+ try:
206
+ _ = out[0][0]
207
+ return out
208
+ except Exception:
209
+ pass
210
+ if isinstance(out, dict):
211
+ for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
212
+ if k in out:
213
+ return [[out[k]]]
214
+ if len(out) > 0:
215
+ return [[next(iter(out.values()))]]
216
+ for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
217
+ if hasattr(out, attr):
218
+ return [[getattr(out, attr)]]
219
+ try:
220
+ if isinstance(out, torch.Tensor):
221
+ return [[out]]
222
+ except Exception:
223
+ pass
224
+ return [[out]]
225
+ acoustic.encode = encode_wrapped
226
+ logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
227
+ except Exception as e:
228
+ logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
229
+
230
+ def main() -> None:
231
+ parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
232
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
233
+
234
+ logging.basicConfig(
235
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
236
+ datefmt="%m/%d/%Y %H:%M:%S",
237
+ level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
238
+ )
239
+ logger.info("Training/evaluation parameters %s", training_args)
240
+ set_seed(training_args.seed)
241
+
242
+ # Configure gradient clipping
243
+ if not getattr(training_args, "gradient_clipping", False):
244
+ if hasattr(training_args, "max_grad_norm"):
245
+ training_args.max_grad_norm = 0.0
246
+ logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
247
+ else:
248
+ if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
249
+ training_args.max_grad_norm = 1.0
250
+ logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
251
+
252
+ # Load processor
253
+ processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
254
+ if processor_path is None:
255
+ raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
256
+ processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
257
+
258
+ # Required special tokens
259
+ tok = processor.tokenizer
260
+ for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
261
+ if not hasattr(tok, required) or getattr(tok, required) is None:
262
+ raise RuntimeError(f"Tokenizer missing required special id: {required}")
263
+
264
+ # Load model
265
+ if model_args.model_name_or_path is None:
266
+ raise ValueError("--model_name_or_path is required to load VibeVoice base model")
267
+ dtype = torch.float32
268
+ if training_args.bf16:
269
+ dtype = torch.bfloat16
270
+ elif getattr(training_args, "fp16", False):
271
+ dtype = torch.float16
272
+ model = VibeVoiceForConditionalGeneration.from_pretrained(
273
+ model_args.model_name_or_path,
274
+ torch_dtype=dtype, device_map={"": 0},
275
+ )
276
+ _patch_acoustic_encode_for_legacy_indexing(model, logger)
277
+ processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
278
+
279
+ # Diagnostics: LM head tie
280
+ try:
281
+ in_emb_mod = model.get_input_embeddings()
282
+ out_emb_mod = model.get_output_embeddings()
283
+ in_w = getattr(in_emb_mod, "weight", None)
284
+ out_w = getattr(out_emb_mod, "weight", None)
285
+ shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
286
+ values_equal = False
287
+ if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
288
+ try:
289
+ values_equal = bool(torch.allclose(in_w, out_w))
290
+ except Exception:
291
+ values_equal = False
292
+ try:
293
+ tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
294
+ except Exception:
295
+ tie_cfg = getattr(model.config, "tie_word_embeddings", None)
296
+ logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
297
+ if out_w is not None:
298
+ logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
299
+ except Exception as e:
300
+ logger.warning(f"LM head tie diagnostics failed: {e}")
301
+
302
+ # Hard-tie LM head
303
+ try:
304
+ emb_module = model.get_input_embeddings()
305
+ head_module = model.get_output_embeddings()
306
+ if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
307
+ if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
308
+ with torch.no_grad():
309
+ head_module.weight = emb_module.weight
310
+ logger.info("Force-tied LM head weight to input embeddings (pointer share).")
311
+ except Exception as e:
312
+ logger.warning(f"Force-tie of LM head failed: {e}")
313
+
314
+ # Validate special IDs (info logs only)
315
+ try:
316
+ special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
317
+ try:
318
+ vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
319
+ except Exception:
320
+ vocab_size = 0
321
+ in_emb_mod = model.get_input_embeddings()
322
+ out_emb_mod = model.get_output_embeddings()
323
+ in_w = getattr(in_emb_mod, "weight", None)
324
+ out_w = getattr(out_emb_mod, "weight", None)
325
+ for name in special_names:
326
+ val = getattr(tok, name, None)
327
+ exists = (val is not None)
328
+ in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
329
+ equal_row = None
330
+ if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
331
+ try:
332
+ equal_row = bool(torch.allclose(in_w[val], out_w[val]))
333
+ except Exception:
334
+ equal_row = False
335
+ decoded_str = None
336
+ if exists and isinstance(val, int):
337
+ try:
338
+ decoded_str = tok.decode([val])
339
+ except Exception:
340
+ try:
341
+ decoded_str = tok.convert_ids_to_tokens(val)
342
+ except Exception:
343
+ decoded_str = "<decode_failed>"
344
+ logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
345
+ except Exception as e:
346
+ logger.warning(f"Special token ID/row validation failed: {e}")
347
+
348
+ # Quick tokenizer diagnostics (optional)
349
+ try:
350
+ logger.info("=== TOKENIZER DIAGNOSTICS ===")
351
+ logger.info(f"Tokenizer class: {type(tok).__name__}")
352
+ logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
353
+ # tiny CE smoke test
354
+ with torch.no_grad():
355
+ simple_text = "The cat sat on the mat."
356
+ simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
357
+ simple_mask = torch.ones_like(simple_ids)
358
+ x = model.get_input_embeddings()(simple_ids)
359
+ outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
360
+ logits = model.lm_head(outputs.last_hidden_state)
361
+ shift_logits = logits[:, :-1, :].contiguous()
362
+ shift_labels = simple_ids[:, 1:].contiguous()
363
+ ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
364
+ logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
365
+ except Exception as e:
366
+ logger.warning(f"Tokenizer diagnostics failed: {e}")
367
+
368
+ # Disable cache during training
369
+ if hasattr(model.config, "use_cache") and training_args.do_train:
370
+ model.config.use_cache = False
371
+
372
+ # Freeze tokenizers
373
+ if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
374
+ for p in model.model.acoustic_tokenizer.parameters():
375
+ p.requires_grad = False
376
+ if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
377
+ for p in model.model.semantic_tokenizer.parameters():
378
+ p.requires_grad = False
379
+
380
+ # LoRA wrap LLM (optional)
381
+ lora_cfg = build_lora_config(model_args)
382
+ tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
383
+ skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
384
+ if not skip_lm_lora:
385
+ model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
386
+ else:
387
+ logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
388
+
389
+ try:
390
+ model.tie_weights()
391
+ except Exception:
392
+ pass
393
+
394
+ # Freeze all then enable trainable subsets
395
+ for _, p in model.named_parameters():
396
+ p.requires_grad = False
397
+
398
+ try:
399
+ for n, p in model.model.language_model.named_parameters():
400
+ if "lora_A" in n or "lora_B" in n:
401
+ p.requires_grad = True
402
+ except Exception:
403
+ logger.warning("Could not re-enable LoRA params on language_model.")
404
+
405
+ # Diffusion head LoRA wrapping (optional)
406
+ if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
407
+ class _HeadForwardShim(nn.Module):
408
+ def __init__(self, base: nn.Module): super().__init__(); self.base = base
409
+ def forward(self, *args, **kwargs):
410
+ if len(args) >= 3:
411
+ noisy_images, timesteps, condition = args[:3]
412
+ else:
413
+ noisy_images = kwargs.get("noisy_images")
414
+ timesteps = kwargs.get("timesteps")
415
+ condition = kwargs.get("condition")
416
+ return self.base(noisy_images, timesteps, condition)
417
+ try:
418
+ shim = _HeadForwardShim(model.model.prediction_head)
419
+ model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
420
+ for n, p in model.model.prediction_head.named_parameters():
421
+ if "lora_A" in n or "lora_B" in n:
422
+ p.requires_grad = True
423
+ except Exception as e:
424
+ logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
425
+
426
+ # Train full diffusion head (optional)
427
+ if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
428
+ for p in model.model.prediction_head.parameters():
429
+ p.requires_grad = True
430
+
431
+ # Freeze diffusion head layers (optional)
432
+ if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
433
+ head_params = list(model.model.prediction_head.named_parameters())
434
+ try:
435
+ indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
436
+ frozen_count = 0
437
+ for i, (name, param) in enumerate(head_params):
438
+ if i in indices_to_freeze:
439
+ param.requires_grad = False
440
+ frozen_count += 1
441
+ logger.info(f"Froze layer [{i}]: {name}")
442
+ logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
443
+ except Exception as e:
444
+ logger.error(f"Could not parse --layers_to_freeze: {e}")
445
+ raise
446
+
447
+ # Connectors
448
+ if getattr(model_args, "train_connectors", False):
449
+ if hasattr(model.model, "acoustic_connector"):
450
+ for p in model.model.acoustic_connector.parameters():
451
+ p.requires_grad = True
452
+ if hasattr(model.model, "semantic_connector"):
453
+ for p in model.model.semantic_connector.parameters():
454
+ p.requires_grad = True
455
+ else:
456
+ if hasattr(model.model, "acoustic_connector"):
457
+ for p in model.model.acoustic_connector.parameters():
458
+ p.requires_grad = False
459
+ if hasattr(model.model, "semantic_connector"):
460
+ for p in model.model.semantic_connector.parameters():
461
+ p.requires_grad = False
462
+
463
+ # Freeze embedding + head
464
+ try:
465
+ emb = model.get_input_embeddings()
466
+ if hasattr(emb, "weight"):
467
+ emb.weight.requires_grad_(False)
468
+ head = model.get_output_embeddings()
469
+ if head is not None and hasattr(head, "weight"):
470
+ head.weight.requires_grad_(False)
471
+ except Exception:
472
+ pass
473
+
474
+ # Diagnostics
475
+ def _sum_params(named_iter):
476
+ return sum(p.numel() for _, p in named_iter if p.requires_grad)
477
+ try:
478
+ lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
479
+ pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
480
+ ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
481
+ se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
482
+ total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
483
+ logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
484
+ logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
485
+ except Exception:
486
+ pass
487
+
488
+ # Preprocessed data classes
489
+ class PreprocessedBatchDataset:
490
+ def __init__(self, preprocessed_file: str):
491
+ self.data = torch.load(preprocessed_file, map_location='cpu')
492
+ logger.info(f"Loaded {len(self.data)} preprocessed batches from {preprocessed_file}")
493
+
494
+ def __len__(self):
495
+ return len(self.data)
496
+
497
+ def __getitem__(self, idx):
498
+ batch = self.data[idx]
499
+ result = {}
500
+ for k, v in batch.items():
501
+ if isinstance(v, torch.Tensor):
502
+ result[k] = v
503
+ else:
504
+ result[k] = v
505
+ return result
506
+
507
+ class PreprocessedBatchSubset:
508
+ def __init__(self, dataset: 'PreprocessedBatchDataset', indices: List[int]):
509
+ self.dataset = dataset
510
+ self.indices = indices
511
+
512
+ def __len__(self):
513
+ return len(self.indices)
514
+
515
+ def __getitem__(self, idx):
516
+ actual_idx = self.indices[idx]
517
+ return self.dataset[actual_idx]
518
+
519
+ class PreprocessedBatchCollator:
520
+ def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
521
+ if not batch:
522
+ return {}
523
+ result = {}
524
+ for key in batch[0].keys():
525
+ tensors = [b[key] for b in batch if b[key] is not None]
526
+ if tensors and isinstance(tensors[0], torch.Tensor):
527
+ result[key] = torch.cat(tensors, dim=0)
528
+ else:
529
+ result[key] = tensors[0] if tensors else None
530
+ return result
531
+
532
+ # Datasets
533
+ preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
534
+ preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
535
+
536
+ if os.path.exists(preprocessed_file):
537
+ logger.info(f"Loading preprocessed data from {preprocessed_file}")
538
+ preprocessed_data = PreprocessedBatchDataset(preprocessed_file)
539
+
540
+ train_dataset = preprocessed_data
541
+ eval_dataset = None
542
+
543
+ if training_args.do_eval and data_args.eval_split_size and data_args.eval_split_size > 0 and len(preprocessed_data) > 1:
544
+ num_eval = max(1, int(len(preprocessed_data) * data_args.eval_split_size))
545
+ num_train = len(preprocessed_data) - num_eval
546
+ indices = list(range(len(preprocessed_data)))
547
+ import random
548
+ random.Random(training_args.seed).shuffle(indices)
549
+ train_indices = indices[:num_train]
550
+ eval_indices = indices[num_train:]
551
+ train_dataset = PreprocessedBatchSubset(preprocessed_data, train_indices)
552
+ eval_dataset = PreprocessedBatchSubset(preprocessed_data, eval_indices)
553
+ else:
554
+ logger.info(f"Preprocessed data not found at {preprocessed_file}, loading from raw JSONL/HF datasets")
555
+ verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
556
+ if data_args.train_jsonl is not None:
557
+ data_files: Dict[str, str] = {"train": data_args.train_jsonl}
558
+ if data_args.validation_jsonl is not None:
559
+ data_files["validation"] = data_args.validation_jsonl
560
+ raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
561
+ else:
562
+ if data_args.dataset_name is None:
563
+ raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
564
+ raw = load_dataset(
565
+ data_args.dataset_name,
566
+ data_args.dataset_config_name,
567
+ verification_mode=verification_mode,
568
+ cache_dir=model_args.cache_dir,
569
+ )
570
+ train_ds = raw[data_args.train_split_name]
571
+ eval_ds = None
572
+ if training_args.do_eval:
573
+ if data_args.eval_split_name and data_args.eval_split_name in raw:
574
+ eval_ds = raw[data_args.eval_split_name]
575
+ elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
576
+ split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
577
+ train_ds, eval_ds = split["train"], split["test"]
578
+
579
+ train_dataset = VibeVoiceDataset(
580
+ train_ds,
581
+ text_column=data_args.text_column_name,
582
+ audio_column=data_args.audio_column_name,
583
+ voice_prompts_column=data_args.voice_prompts_column_name,
584
+ )
585
+ eval_dataset = None
586
+ if eval_ds is not None:
587
+ eval_dataset = VibeVoiceDataset(
588
+ eval_ds,
589
+ text_column=data_args.text_column_name,
590
+ audio_column=data_args.audio_column_name,
591
+ voice_prompts_column=data_args.voice_prompts_column_name,
592
+ )
593
+
594
+ # Ratios/dims from processor+model
595
+ speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
596
+ semantic_dim = getattr(model.config, "semantic_vae_dim", None)
597
+ if semantic_dim is None:
598
+ try:
599
+ semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
600
+ except Exception:
601
+ semantic_dim = 128
602
+
603
+ compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
604
+
605
+ if os.path.exists(preprocessed_file):
606
+ data_collator = PreprocessedBatchCollator()
607
+ else:
608
+ data_collator = VibeVoiceCollator(
609
+ processor=processor,
610
+ max_length=data_args.max_length,
611
+ speech_compress_ratio=speech_compress_ratio,
612
+ semantic_vae_dim=semantic_dim,
613
+ compute_semantics=compute_semantics_flag,
614
+ debug_checks=False,
615
+ voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
616
+ )
617
+
618
+ class LoRADebugCallback(TrainerCallback):
619
+ def __init__(self, log_every_n_steps: int = 50):
620
+ self.log_every_n_steps = max(1, int(log_every_n_steps))
621
+ self.prev_param_norms: Dict[str, float] = {}
622
+ self.lora_param_names: List[str] = []
623
+
624
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
625
+ try:
626
+ if model is None:
627
+ return
628
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
629
+ self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
630
+ for n in self.lora_param_names:
631
+ p = named[n]
632
+ self.prev_param_norms[n] = float(p.data.norm().item())
633
+ total = len(self.lora_param_names)
634
+ req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
635
+ num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
636
+ num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
637
+ zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
638
+ logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
639
+ if total == 0:
640
+ logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
641
+ if req_grad != total:
642
+ logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
643
+ except Exception as e:
644
+ logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
645
+
646
+ def on_step_end(self, args, state, control, model=None, **kwargs):
647
+ try:
648
+ if model is None or len(self.lora_param_names) == 0:
649
+ return
650
+ step = int(getattr(state, "global_step", 0) or 0)
651
+ if step % self.log_every_n_steps != 0 and step != 1:
652
+ return
653
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
654
+ changed_A = 0
655
+ changed_B = 0
656
+ zero_B = 0
657
+ eps = 1e-12
658
+ for n in self.lora_param_names:
659
+ p = named.get(n, None)
660
+ if p is None:
661
+ continue
662
+ prev = self.prev_param_norms.get(n, 0.0)
663
+ curr = float(p.data.norm().item())
664
+ if "lora_A" in n and abs(curr - prev) > eps:
665
+ changed_A += 1
666
+ if "lora_B" in n:
667
+ if abs(curr - prev) > eps:
668
+ changed_B += 1
669
+ if curr == 0.0:
670
+ zero_B += 1
671
+ self.prev_param_norms[n] = curr
672
+ total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
673
+ total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
674
+ logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
675
+ except Exception as e:
676
+ logger.warning(f"LoRA debug (on_step_end) failed: {e}")
677
+
678
+ class VibeVoiceTrainer(Trainer):
679
+ def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
680
+ labels = inputs.get("input_ids")
681
+ attention_mask = inputs.get("attention_mask")
682
+ acoustic_input_mask = inputs.get("acoustic_input_mask")
683
+
684
+ # Ensure semantic tensors exist and have correct dtype/device
685
+ sem = inputs.get("speech_semantic_tensors", None)
686
+ try:
687
+ target_dtype = next(model.model.semantic_connector.parameters()).dtype
688
+ except Exception:
689
+ target_dtype = model.get_input_embeddings().weight.dtype
690
+
691
+ if sem is None:
692
+ sm = inputs.get("speech_masks")
693
+ if sm is not None:
694
+ zeros = torch.zeros(
695
+ sm.size(0), sm.size(1),
696
+ getattr(model.config, "semantic_vae_dim", 128),
697
+ dtype=target_dtype,
698
+ device=sm.device,
699
+ )
700
+ inputs["speech_semantic_tensors"] = zeros
701
+ else:
702
+ if isinstance(sem, torch.Tensor):
703
+ inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
704
+
705
+ outputs = model(
706
+ input_ids=inputs.get("input_ids"),
707
+ attention_mask=attention_mask,
708
+ speech_tensors=inputs.get("speech_tensors"),
709
+ speech_masks=inputs.get("speech_masks"),
710
+ speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
711
+ acoustic_input_mask=acoustic_input_mask,
712
+ acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
713
+ speeches_loss_input=inputs.get("speeches_loss_input"),
714
+ ddpm_batch_mul=training_args.ddpm_batch_mul,
715
+ )
716
+
717
+ # Invariants: token/latent selection equality across views (warn, don't assert)
718
+ try:
719
+ al_mask = inputs.get("acoustic_loss_mask")
720
+ sp_masks = inputs.get("speech_masks")
721
+ sp_loss_sel = inputs.get("speeches_loss_input")
722
+ num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
723
+ num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
724
+ num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
725
+ num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
726
+ self.log({
727
+ "debug/num_tok_total": float(num_tok_total),
728
+ "debug/num_tok_loss": float(num_tok_loss),
729
+ "debug/num_lat_total": float(num_lat_total),
730
+ "debug/num_lat_loss": float(num_lat_loss),
731
+ })
732
+ if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
733
+ if num_tok_loss != num_lat_loss:
734
+ logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
735
+ except Exception:
736
+ pass
737
+
738
+ # CE Loss
739
+ logits = outputs.logits
740
+ ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
741
+ shift_logits = logits[:, :-1, :].contiguous()
742
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
743
+ ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
744
+
745
+ # Optional CE diagnostics
746
+ try:
747
+ self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
748
+ except Exception as e:
749
+ logger.warning(f"Failed invoking CE debug: {e}")
750
+
751
+ # Diffusion loss
752
+ diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
753
+ total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
754
+
755
+ # Logs
756
+ try:
757
+ prefix = "train" if model.training else "eval"
758
+ self.log({
759
+ f"{prefix}/ce_loss": ce_loss.detach().item(),
760
+ f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
761
+ })
762
+ if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
763
+ lr_val = self.optimizer.param_groups[0].get("lr", None)
764
+ if lr_val is not None:
765
+ self.log({"train/learning_rate_real": float(lr_val)})
766
+ except Exception:
767
+ pass
768
+
769
+ return (total, outputs) if return_outputs else total
770
+
771
+ def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
772
+ try:
773
+ if not getattr(training_args, "debug_ce_details", False):
774
+ return
775
+ step = int(getattr(self.state, "global_step", 0) or 0)
776
+ every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
777
+ if not (step <= 1 or (step % every_n == 0)):
778
+ return
779
+
780
+ with torch.no_grad():
781
+ vocab = shift_logits.size(-1)
782
+ per_token_loss = F.cross_entropy(
783
+ shift_logits.view(-1, vocab),
784
+ ce_labels.view(-1),
785
+ reduction="none",
786
+ ignore_index=-100,
787
+ ).view_as(ce_labels)
788
+
789
+ valid_mask = ce_labels.ne(-100)
790
+ num_valid = int(valid_mask.sum().item())
791
+ avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
792
+
793
+ per_ex_avgs = []
794
+ max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
795
+ B = ce_labels.size(0)
796
+ for b in range(min(B, max_examples)):
797
+ vb = valid_mask[b]
798
+ if int(vb.sum().item()) > 0:
799
+ per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
800
+ else:
801
+ per_ex_avgs.append(float("nan"))
802
+ logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
803
+ except Exception as e:
804
+ logger.warning(f"CE detailed debug failed: {e}")
805
+
806
+ # --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
807
+
808
+
809
+ def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
810
+ try:
811
+ target_dir = output_dir or self.args.output_dir
812
+ lora_out = os.path.join(target_dir, "lora")
813
+ os.makedirs(lora_out, exist_ok=True)
814
+
815
+ # --- LLM PEFT adapters (if LoRA-wrapped) ---
816
+ language_model = getattr(self.model.model, "language_model", None)
817
+ if hasattr(language_model, "save_pretrained"):
818
+ language_model.save_pretrained(lora_out)
819
+
820
+ # --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
821
+ pred_head = getattr(self.model.model, "prediction_head", None)
822
+ if hasattr(pred_head, "save_pretrained"):
823
+ ph_dir = os.path.join(lora_out, "diffusion_head")
824
+ os.makedirs(ph_dir, exist_ok=True)
825
+ pred_head.save_pretrained(ph_dir)
826
+
827
+ # --- ALWAYS save FULL diffusion head state_dict for fallback ---
828
+ if pred_head is not None and hasattr(pred_head, "state_dict"):
829
+ sd = pred_head.state_dict()
830
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
831
+ ph_dir = os.path.join(lora_out, "diffusion_head")
832
+ os.makedirs(ph_dir, exist_ok=True)
833
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
834
+
835
+ # --- Connectors (plain state_dicts) ---
836
+ ac = getattr(self.model.model, "acoustic_connector", None)
837
+ if ac is not None:
838
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
839
+ os.makedirs(ac_dir, exist_ok=True)
840
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
841
+
842
+ se = getattr(self.model.model, "semantic_connector", None)
843
+ if se is not None:
844
+ se_dir = os.path.join(lora_out, "semantic_connector")
845
+ os.makedirs(se_dir, exist_ok=True)
846
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
847
+
848
+ except Exception as e:
849
+ logger.warning(f"Failed to save LoRA assets: {e}")
850
+
851
+
852
+ # ------------- Build the Trainer -------------
853
+
854
+ # Resolve which adapters to apply in samples
855
+
856
+ ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cuda")
857
+
858
+ # --- CRITICAL FIX: CAST TRAINABLE PARAMS TO FP32 ---
859
+ # This prevents 'ValueError: Attempting to unscale FP16 gradients'
860
+ if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
861
+ print('>>> INFO: Enforcing float32 for trainable parameters (LoRA/Head) to fix GradScaler.')
862
+ for name, param in model.named_parameters():
863
+ if param.requires_grad:
864
+ param.data = param.data.to(torch.float32)
865
+ # ---------------------------------------------------
866
+
867
+ trainer = VibeVoiceTrainer(
868
+ model=model,
869
+ args=training_args,
870
+ train_dataset=train_dataset,
871
+ eval_dataset=eval_dataset,
872
+ data_collator=data_collator,
873
+ callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
874
+ )
875
+
876
+ # Optional debug pre-training save
877
+ if getattr(training_args, "debug_save", False):
878
+ try:
879
+ debug_dir = os.path.join(training_args.output_dir, "debug_initial")
880
+ lora_out = os.path.join(debug_dir, "lora")
881
+ os.makedirs(lora_out, exist_ok=True)
882
+ logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
883
+ # language model adapters / base
884
+ try:
885
+ if hasattr(model.model.language_model, "save_pretrained"):
886
+ model.model.language_model.save_pretrained(lora_out)
887
+ except Exception as e_lm:
888
+ logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
889
+ # diffusion head
890
+ try:
891
+ if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
892
+ model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
893
+ except Exception as e_head:
894
+ logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
895
+ # NEW: full diffusion head state_dict as fallback
896
+ try:
897
+ ph = getattr(model.model, "prediction_head", None)
898
+ if ph is not None and hasattr(ph, "state_dict"):
899
+ sd = ph.state_dict()
900
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
901
+ os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
902
+ torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
903
+ except Exception as e:
904
+ logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
905
+ # connectors
906
+ try:
907
+ ac_conn = getattr(model.model, "acoustic_connector", None)
908
+ if ac_conn is not None:
909
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
910
+ os.makedirs(ac_dir, exist_ok=True)
911
+ torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
912
+ except Exception as e_ac:
913
+ logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
914
+ try:
915
+ se_conn = getattr(model.model, "semantic_connector", None)
916
+ if se_conn is not None:
917
+ se_dir = os.path.join(lora_out, "semantic_connector")
918
+ os.makedirs(se_dir, exist_ok=True)
919
+ torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
920
+ except Exception as e_se:
921
+ logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
922
+ except Exception as e:
923
+ logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
924
+
925
+ if getattr(training_args, "gradient_checkpointing", False):
926
+ try:
927
+ model.gradient_checkpointing_enable()
928
+ except Exception:
929
+ logger.warning("Failed to enable gradient checkpointing on the model.")
930
+
931
+ # =========================================================================
932
+ # Load Custom Weights from Checkpoint before resuming training
933
+ # =========================================================================
934
+ if training_args.do_train and training_args.resume_from_checkpoint:
935
+ checkpoint_path = None
936
+ if isinstance(training_args.resume_from_checkpoint, bool) and training_args.resume_from_checkpoint:
937
+ from transformers.trainer_utils import get_last_checkpoint
938
+ checkpoint_path = get_last_checkpoint(training_args.output_dir)
939
+ else:
940
+ checkpoint_path = training_args.resume_from_checkpoint
941
+
942
+ if checkpoint_path is not None and os.path.exists(checkpoint_path):
943
+ lora_dir = os.path.join(checkpoint_path, "lora")
944
+ if os.path.exists(lora_dir):
945
+ logger.info(f"*** Resuming custom weights (LoRA, Connectors, Head) from {lora_dir} ***")
946
+
947
+ # 1. Load LLM LoRA
948
+ if hasattr(model.model, "language_model"):
949
+ try:
950
+ from peft import load_peft_weights, set_peft_model_state_dict
951
+ adapters_weights = load_peft_weights(lora_dir)
952
+ set_peft_model_state_dict(model.model.language_model, adapters_weights)
953
+ logger.info("Successfully loaded LLM LoRA weights.")
954
+ except Exception as e:
955
+ logger.warning(f"Could not load LLM LoRA weights: {e}")
956
+
957
+ # 2. Load Diffusion Head
958
+ ph_full_path = os.path.join(lora_dir, "diffusion_head_full.bin")
959
+ if os.path.exists(ph_full_path) and hasattr(model.model, "prediction_head"):
960
+ try:
961
+ model.model.prediction_head.load_state_dict(torch.load(ph_full_path, map_location="cpu"), strict=False)
962
+ logger.info("Successfully loaded Diffusion Head weights.")
963
+ except Exception as e:
964
+ logger.warning(f"Failed to load Diffusion Head weights: {e}")
965
+
966
+ # 3. Load Acoustic Connector
967
+ ac_path = os.path.join(lora_dir, "acoustic_connector", "pytorch_model.bin")
968
+ if os.path.exists(ac_path) and hasattr(model.model, "acoustic_connector"):
969
+ try:
970
+ model.model.acoustic_connector.load_state_dict(torch.load(ac_path, map_location="cpu"))
971
+ logger.info("Successfully loaded Acoustic Connector weights.")
972
+ except Exception as e:
973
+ logger.warning(f"Failed to load Acoustic Connector weights: {e}")
974
+
975
+ # 4. Load Semantic Connector
976
+ se_path = os.path.join(lora_dir, "semantic_connector", "pytorch_model.bin")
977
+ if os.path.exists(se_path) and hasattr(model.model, "semantic_connector"):
978
+ try:
979
+ model.model.semantic_connector.load_state_dict(torch.load(se_path, map_location="cpu"))
980
+ logger.info("Successfully loaded Semantic Connector weights.")
981
+ except Exception as e:
982
+ logger.warning(f"Failed to load Semantic Connector weights: {e}")
983
+ else:
984
+ logger.warning(f"No custom 'lora' directory found inside checkpoint: {checkpoint_path}")
985
+ # =========================================================================
986
+
987
+ if training_args.do_train:
988
+ # ----- THE FIX: SET resume_from_checkpoint=False HERE -----
989
+ # The weights are ALREADY loaded via the custom block above.
990
+ # Setting this to False forces Trainer to start counting steps/epochs from 0
991
+ # for your new dataset, preventing it from immediately exiting.
992
+ trainer.train(resume_from_checkpoint=False)
993
+
994
+ lora_out = os.path.join(training_args.output_dir, "lora")
995
+ os.makedirs(lora_out, exist_ok=True)
996
+
997
+ # LLM PEFT (if any)
998
+ lm = getattr(model.model, "language_model", None)
999
+ if hasattr(lm, "save_pretrained"):
1000
+ lm.save_pretrained(lora_out)
1001
+
1002
+ # Diffusion head PEFT (if any)
1003
+ ph = getattr(model.model, "prediction_head", None)
1004
+ if hasattr(ph, "save_pretrained"):
1005
+ ph_dir = os.path.join(lora_out, "diffusion_head")
1006
+ os.makedirs(ph_dir, exist_ok=True)
1007
+ ph.save_pretrained(ph_dir)
1008
+
1009
+ # ALWAYS: full diffusion head state_dict fallback
1010
+ try:
1011
+ if ph is not None and hasattr(ph, "state_dict"):
1012
+ sd = ph.state_dict()
1013
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
1014
+ ph_dir = os.path.join(lora_out, "diffusion_head")
1015
+ os.makedirs(ph_dir, exist_ok=True)
1016
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
1017
+ except Exception as e:
1018
+ logger.warning(f"Failed to save FULL diffusion head at end: {e}")
1019
+
1020
+ # Connectors (if trained)
1021
+ try:
1022
+ ac = getattr(model.model, "acoustic_connector", None)
1023
+ if ac is not None:
1024
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
1025
+ os.makedirs(ac_dir, exist_ok=True)
1026
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
1027
+ except Exception as e:
1028
+ logger.warning(f"Failed to save acoustic_connector: {e}")
1029
+
1030
+ try:
1031
+ se = getattr(model.model, "semantic_connector", None)
1032
+ if se is not None:
1033
+ se_dir = os.path.join(lora_out, "semantic_connector")
1034
+ os.makedirs(se_dir, exist_ok=True)
1035
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
1036
+ except Exception as e:
1037
+ logger.warning(f"Failed to save semantic_connector: {e}")
1038
+
1039
+ if training_args.do_eval and eval_dataset is not None:
1040
+ trainer.evaluate()
1041
+
1042
+
1043
+ if __name__ == "__main__":
1044
+ main()
VibeVoice-finetuning/src/finetune_vibevoice_lora120.py ADDED
@@ -0,0 +1,1072 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_vibevoice_lora.py
2
+ import os
3
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
4
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
5
+
6
+ import logging
7
+ import os
8
+ from dataclasses import dataclass, field
9
+ from typing import Any, Dict, List, Optional, Tuple
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from datasets import load_dataset, DatasetDict, VerificationMode
15
+
16
+ from transformers import (
17
+ HfArgumentParser,
18
+ Trainer,
19
+ set_seed,
20
+ TrainerCallback,
21
+ BitsAndBytesConfig,
22
+ )
23
+ from transformers import TrainingArguments as HfTrainingArguments
24
+
25
+ from peft import LoraConfig, get_peft_model, TaskType, prepare_model_for_kbit_training
26
+
27
+ from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
28
+ from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
29
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
30
+
31
+ from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+ # ================== SAMPLE CALLBACK UTILS ==================
36
+
37
+ import copy
38
+ import torch
39
+ from transformers import TrainerCallback
40
+
41
+ class EmaCallback(TrainerCallback):
42
+ def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cuda"):
43
+ """
44
+ attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
45
+ decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
46
+ """
47
+ self.attr_path = attr_path
48
+ self.decay = float(decay)
49
+ self.device = torch.device(device)
50
+ self.shadow = None
51
+ self._orig = None # store non-EMA weights when we swap
52
+
53
+ def _get_module(self, model):
54
+ # Resolve dotted path like "model.prediction_head"
55
+ mod = model
56
+ for name in self.attr_path.split('.'):
57
+ mod = getattr(mod, name)
58
+ return mod
59
+
60
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
61
+ head = self._get_module(model)
62
+ self.shadow = {k: p.detach().to(self.device).clone()
63
+ for k, p in head.state_dict().items()}
64
+
65
+ def on_step_end(self, args, state, control, model=None, **kwargs):
66
+ if self.shadow is None: return
67
+ head = self._get_module(model)
68
+ with torch.no_grad():
69
+ for k, v in head.state_dict().items():
70
+ self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
71
+
72
+ # ---- Swap helpers ----
73
+ def _swap_in_ema(self, model):
74
+ head = self._get_module(model)
75
+ self._orig = copy.deepcopy(head.state_dict())
76
+ head.load_state_dict(self.shadow, strict=False)
77
+
78
+ def _swap_back(self, model):
79
+ if self._orig is None: return
80
+ head = self._get_module(model)
81
+ head.load_state_dict(self._orig, strict=False)
82
+ self._orig = None
83
+
84
+ def on_evaluate(self, args, state, control, model=None, **kwargs):
85
+ # use EMA during eval
86
+ self._swap_in_ema(model)
87
+
88
+ def on_evaluate_end(self, args, state, control, model=None, **kwargs):
89
+ self._swap_back(model)
90
+
91
+ def on_save(self, args, state, control, model=None, **kwargs):
92
+ # temporarily swap to EMA, let Trainer save, then swap back
93
+ self._swap_in_ema(model)
94
+
95
+ def on_save_end(self, args, state, control, model=None, **kwargs):
96
+ self._swap_back(model)
97
+
98
+ def on_train_end(self, args, state, control, model=None, **kwargs):
99
+ # final checkpoint: persist EMA
100
+ self._swap_in_ema(model)
101
+
102
+
103
+ @dataclass
104
+ class ModelArguments:
105
+ model_name_or_path: Optional[str] = field(
106
+ default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
107
+ )
108
+ processor_name_or_path: Optional[str] = field(
109
+ default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
110
+ )
111
+ cache_dir: Optional[str] = field(default=None)
112
+ freeze_acoustic_tokenizer: bool = field(default=True)
113
+ freeze_semantic_tokenizer: bool = field(default=True)
114
+ lora_r: int = field(default=8)
115
+ lora_alpha: int = field(default=32)
116
+ lora_dropout: float = field(default=0.05)
117
+ lora_target_modules: str = field(
118
+ default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
119
+ metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
120
+ )
121
+ lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
122
+ train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
123
+ train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
124
+ layers_to_freeze: Optional[str] = field(
125
+ default=None,
126
+ metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
127
+ )
128
+ load_in_4bit: bool = field(
129
+ default=False,
130
+ metadata={"help": "Load the base model in 4-bit quantization (QLoRA) to save VRAM."}
131
+ )
132
+
133
+ @dataclass
134
+ class DataArguments:
135
+ dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
136
+ dataset_config_name: Optional[str] = field(default=None)
137
+ train_split_name: str = field(default="train")
138
+ eval_split_name: Optional[str] = field(default="validation")
139
+ text_column_name: str = field(default="text")
140
+ audio_column_name: str = field(default="audio")
141
+ voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
142
+ eval_split_size: float = field(default=0.0)
143
+ ignore_verifications: bool = field(default=False)
144
+ max_length: Optional[int] = field(default=None)
145
+ train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
146
+ validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
147
+ voice_prompt_drop_rate: float = field(
148
+ default=0.0,
149
+ metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
150
+ )
151
+
152
+ @dataclass
153
+ class CustomTrainingArguments(HfTrainingArguments):
154
+ ddpm_batch_mul: int = field(default=1)
155
+ ce_loss_weight: float = field(default=1.0)
156
+ diffusion_loss_weight: float = field(default=1.0)
157
+ debug_ce_details: bool = field(default=False)
158
+ debug_ce_topk: int = field(default=5)
159
+ debug_ce_max_examples: int = field(default=1)
160
+ debug_ce_every_n_steps: int = field(default=200)
161
+ gradient_clipping: bool = field(
162
+ default=False,
163
+ metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
164
+ )
165
+ debug_save: bool = field(
166
+ default=False,
167
+ metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
168
+ )
169
+
170
+ def build_lora_config(args: ModelArguments) -> LoraConfig:
171
+ target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
172
+ return LoraConfig(
173
+ r=args.lora_r,
174
+ lora_alpha=args.lora_alpha,
175
+ lora_dropout=args.lora_dropout,
176
+ bias="none",
177
+ task_type=TaskType.CAUSAL_LM,
178
+ target_modules=target_modules,
179
+ )
180
+
181
+ def build_head_lora_config(args: ModelArguments) -> LoraConfig:
182
+ target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
183
+ return LoraConfig(
184
+ r=args.lora_r,
185
+ lora_alpha=args.lora_alpha,
186
+ lora_dropout=args.lora_dropout,
187
+ bias="none",
188
+ task_type=TaskType.FEATURE_EXTRACTION,
189
+ target_modules=target_modules,
190
+ )
191
+
192
+ def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
193
+ shifted = labels[:, 1:].contiguous()
194
+ base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
195
+ label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
196
+ final_mask = base_mask & (~label_is_acoustic)
197
+ out = shifted.clone()
198
+ out[~final_mask] = pad_id
199
+ return out
200
+
201
+ def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
202
+ try:
203
+ acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
204
+ if acoustic is None or not hasattr(acoustic, "encode"):
205
+ logger_.warning("No acoustic_tokenizer.encode() found to patch.")
206
+ return
207
+ base_encode = acoustic.encode
208
+ def encode_wrapped(*args, **kwargs):
209
+ out = base_encode(*args, **kwargs)
210
+ try:
211
+ _ = out[0][0]
212
+ return out
213
+ except Exception:
214
+ pass
215
+ if isinstance(out, dict):
216
+ for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
217
+ if k in out:
218
+ return [[out[k]]]
219
+ if len(out) > 0:
220
+ return [[next(iter(out.values()))]]
221
+ for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
222
+ if hasattr(out, attr):
223
+ return [[getattr(out, attr)]]
224
+ try:
225
+ if isinstance(out, torch.Tensor):
226
+ return [[out]]
227
+ except Exception:
228
+ pass
229
+ return [[out]]
230
+ acoustic.encode = encode_wrapped
231
+ logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
232
+ except Exception as e:
233
+ logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
234
+
235
+ def main() -> None:
236
+ parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
237
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
238
+
239
+ logging.basicConfig(
240
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
241
+ datefmt="%m/%d/%Y %H:%M:%S",
242
+ level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
243
+ )
244
+ logger.info("Training/evaluation parameters %s", training_args)
245
+ set_seed(training_args.seed)
246
+
247
+ # Configure gradient clipping
248
+ if not getattr(training_args, "gradient_clipping", False):
249
+ if hasattr(training_args, "max_grad_norm"):
250
+ training_args.max_grad_norm = 0.0
251
+ logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
252
+ else:
253
+ if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
254
+ training_args.max_grad_norm = 1.0
255
+ logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
256
+
257
+ # Load processor
258
+ processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
259
+ if processor_path is None:
260
+ raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
261
+ processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
262
+
263
+ # Required special tokens
264
+ tok = processor.tokenizer
265
+ for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
266
+ if not hasattr(tok, required) or getattr(tok, required) is None:
267
+ raise RuntimeError(f"Tokenizer missing required special id: {required}")
268
+
269
+ # Set dtype
270
+ dtype = torch.float32
271
+ if training_args.bf16:
272
+ dtype = torch.bfloat16
273
+ elif getattr(training_args, "fp16", False):
274
+ dtype = torch.float16
275
+
276
+ # =========================================================================
277
+ # 4-BIT QUANTIZATION CONFIGURATION (QLoRA)
278
+ # =========================================================================
279
+ quantization_config = None
280
+ if getattr(model_args, "load_in_4bit", False):
281
+ logger.info(">>> Loading base model in 4-bit mode (BitsAndBytes NF4) <<<")
282
+ quantization_config = BitsAndBytesConfig(
283
+ load_in_4bit=True,
284
+ bnb_4bit_compute_dtype=dtype, # Uses bf16/fp16 for compute
285
+ bnb_4bit_quant_type="nf4", # Recommended for QLoRA
286
+ bnb_4bit_use_double_quant=True # Double quantization saves more memory
287
+ )
288
+
289
+ if model_args.model_name_or_path is None:
290
+ raise ValueError("--model_name_or_path is required to load VibeVoice base model")
291
+
292
+ # Load model
293
+ model = VibeVoiceForConditionalGeneration.from_pretrained(
294
+ model_args.model_name_or_path,
295
+ torch_dtype=dtype,
296
+ device_map={"": 0},
297
+ quantization_config=quantization_config,
298
+ )
299
+
300
+ # Prepare model for 4-bit training if enabled
301
+ if getattr(model_args, "load_in_4bit", False):
302
+ model = prepare_model_for_kbit_training(
303
+ model,
304
+ use_gradient_checkpointing=getattr(training_args, "gradient_checkpointing", False)
305
+ )
306
+ # =========================================================================
307
+
308
+ _patch_acoustic_encode_for_legacy_indexing(model, logger)
309
+ processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
310
+
311
+ # Diagnostics: LM head tie
312
+ try:
313
+ in_emb_mod = model.get_input_embeddings()
314
+ out_emb_mod = model.get_output_embeddings()
315
+ in_w = getattr(in_emb_mod, "weight", None)
316
+ out_w = getattr(out_emb_mod, "weight", None)
317
+ shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
318
+ values_equal = False
319
+ if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
320
+ try:
321
+ values_equal = bool(torch.allclose(in_w, out_w))
322
+ except Exception:
323
+ values_equal = False
324
+ try:
325
+ tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
326
+ except Exception:
327
+ tie_cfg = getattr(model.config, "tie_word_embeddings", None)
328
+ logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
329
+ if out_w is not None:
330
+ logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
331
+ except Exception as e:
332
+ logger.warning(f"LM head tie diagnostics failed: {e}")
333
+
334
+ # Hard-tie LM head
335
+ try:
336
+ emb_module = model.get_input_embeddings()
337
+ head_module = model.get_output_embeddings()
338
+ if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
339
+ if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
340
+ with torch.no_grad():
341
+ head_module.weight = emb_module.weight
342
+ logger.info("Force-tied LM head weight to input embeddings (pointer share).")
343
+ except Exception as e:
344
+ logger.warning(f"Force-tie of LM head failed: {e}")
345
+
346
+ # Validate special IDs (info logs only)
347
+ try:
348
+ special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
349
+ try:
350
+ vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
351
+ except Exception:
352
+ vocab_size = 0
353
+ in_emb_mod = model.get_input_embeddings()
354
+ out_emb_mod = model.get_output_embeddings()
355
+ in_w = getattr(in_emb_mod, "weight", None)
356
+ out_w = getattr(out_emb_mod, "weight", None)
357
+ for name in special_names:
358
+ val = getattr(tok, name, None)
359
+ exists = (val is not None)
360
+ in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
361
+ equal_row = None
362
+ if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
363
+ try:
364
+ equal_row = bool(torch.allclose(in_w[val], out_w[val]))
365
+ except Exception:
366
+ equal_row = False
367
+ decoded_str = None
368
+ if exists and isinstance(val, int):
369
+ try:
370
+ decoded_str = tok.decode([val])
371
+ except Exception:
372
+ try:
373
+ decoded_str = tok.convert_ids_to_tokens(val)
374
+ except Exception:
375
+ decoded_str = "<decode_failed>"
376
+ logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
377
+ except Exception as e:
378
+ logger.warning(f"Special token ID/row validation failed: {e}")
379
+
380
+ # Quick tokenizer diagnostics (optional)
381
+ try:
382
+ logger.info("=== TOKENIZER DIAGNOSTICS ===")
383
+ logger.info(f"Tokenizer class: {type(tok).__name__}")
384
+ logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
385
+ # tiny CE smoke test
386
+ with torch.no_grad():
387
+ simple_text = "The cat sat on the mat."
388
+ simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
389
+ simple_mask = torch.ones_like(simple_ids)
390
+ x = model.get_input_embeddings()(simple_ids)
391
+ outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
392
+ logits = model.lm_head(outputs.last_hidden_state)
393
+ shift_logits = logits[:, :-1, :].contiguous()
394
+ shift_labels = simple_ids[:, 1:].contiguous()
395
+ ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
396
+ logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
397
+ except Exception as e:
398
+ logger.warning(f"Tokenizer diagnostics failed: {e}")
399
+
400
+ # Disable cache during training
401
+ if hasattr(model.config, "use_cache") and training_args.do_train:
402
+ model.config.use_cache = False
403
+
404
+ # Freeze tokenizers
405
+ if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
406
+ for p in model.model.acoustic_tokenizer.parameters():
407
+ p.requires_grad = False
408
+ if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
409
+ for p in model.model.semantic_tokenizer.parameters():
410
+ p.requires_grad = False
411
+
412
+ # LoRA wrap LLM (optional)
413
+ lora_cfg = build_lora_config(model_args)
414
+ tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
415
+ skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
416
+ if not skip_lm_lora:
417
+ model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
418
+ else:
419
+ logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
420
+
421
+ try:
422
+ model.tie_weights()
423
+ except Exception:
424
+ pass
425
+
426
+ # Freeze all then enable trainable subsets
427
+ for _, p in model.named_parameters():
428
+ p.requires_grad = False
429
+
430
+ try:
431
+ for n, p in model.model.language_model.named_parameters():
432
+ if "lora_A" in n or "lora_B" in n:
433
+ p.requires_grad = True
434
+ except Exception:
435
+ logger.warning("Could not re-enable LoRA params on language_model.")
436
+
437
+ # Diffusion head LoRA wrapping (optional)
438
+ if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
439
+ class _HeadForwardShim(nn.Module):
440
+ def __init__(self, base: nn.Module): super().__init__(); self.base = base
441
+ def forward(self, *args, **kwargs):
442
+ if len(args) >= 3:
443
+ noisy_images, timesteps, condition = args[:3]
444
+ else:
445
+ noisy_images = kwargs.get("noisy_images")
446
+ timesteps = kwargs.get("timesteps")
447
+ condition = kwargs.get("condition")
448
+ return self.base(noisy_images, timesteps, condition)
449
+ try:
450
+ shim = _HeadForwardShim(model.model.prediction_head)
451
+ model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
452
+ for n, p in model.model.prediction_head.named_parameters():
453
+ if "lora_A" in n or "lora_B" in n:
454
+ p.requires_grad = True
455
+ except Exception as e:
456
+ logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
457
+
458
+ # Train full diffusion head (optional)
459
+ if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
460
+ for p in model.model.prediction_head.parameters():
461
+ p.requires_grad = True
462
+
463
+ # Freeze diffusion head layers (optional)
464
+ if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
465
+ head_params = list(model.model.prediction_head.named_parameters())
466
+ try:
467
+ indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
468
+ frozen_count = 0
469
+ for i, (name, param) in enumerate(head_params):
470
+ if i in indices_to_freeze:
471
+ param.requires_grad = False
472
+ frozen_count += 1
473
+ logger.info(f"Froze layer [{i}]: {name}")
474
+ logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
475
+ except Exception as e:
476
+ logger.error(f"Could not parse --layers_to_freeze: {e}")
477
+ raise
478
+
479
+ # Connectors
480
+ if getattr(model_args, "train_connectors", False):
481
+ if hasattr(model.model, "acoustic_connector"):
482
+ for p in model.model.acoustic_connector.parameters():
483
+ p.requires_grad = True
484
+ if hasattr(model.model, "semantic_connector"):
485
+ for p in model.model.semantic_connector.parameters():
486
+ p.requires_grad = True
487
+ else:
488
+ if hasattr(model.model, "acoustic_connector"):
489
+ for p in model.model.acoustic_connector.parameters():
490
+ p.requires_grad = False
491
+ if hasattr(model.model, "semantic_connector"):
492
+ for p in model.model.semantic_connector.parameters():
493
+ p.requires_grad = False
494
+
495
+ # Freeze embedding + head
496
+ try:
497
+ emb = model.get_input_embeddings()
498
+ if hasattr(emb, "weight"):
499
+ emb.weight.requires_grad_(False)
500
+ head = model.get_output_embeddings()
501
+ if head is not None and hasattr(head, "weight"):
502
+ head.weight.requires_grad_(False)
503
+ except Exception:
504
+ pass
505
+
506
+ # Diagnostics
507
+ def _sum_params(named_iter):
508
+ return sum(p.numel() for _, p in named_iter if p.requires_grad)
509
+ try:
510
+ lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
511
+ pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
512
+ ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
513
+ se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
514
+ total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
515
+ logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
516
+ logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
517
+ except Exception:
518
+ pass
519
+
520
+ # Preprocessed data classes
521
+ class PreprocessedBatchDataset:
522
+ def __init__(self, preprocessed_file: str):
523
+ self.data = torch.load(preprocessed_file, map_location='cpu')
524
+ logger.info(f"Loaded {len(self.data)} preprocessed batches from {preprocessed_file}")
525
+
526
+ def __len__(self):
527
+ return len(self.data)
528
+
529
+ def __getitem__(self, idx):
530
+ batch = self.data[idx]
531
+ result = {}
532
+ for k, v in batch.items():
533
+ if isinstance(v, torch.Tensor):
534
+ result[k] = v
535
+ else:
536
+ result[k] = v
537
+ return result
538
+
539
+ class PreprocessedBatchSubset:
540
+ def __init__(self, dataset: 'PreprocessedBatchDataset', indices: List[int]):
541
+ self.dataset = dataset
542
+ self.indices = indices
543
+
544
+ def __len__(self):
545
+ return len(self.indices)
546
+
547
+ def __getitem__(self, idx):
548
+ actual_idx = self.indices[idx]
549
+ return self.dataset[actual_idx]
550
+
551
+ class PreprocessedBatchCollator:
552
+ def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
553
+ if not batch:
554
+ return {}
555
+ result = {}
556
+ for key in batch[0].keys():
557
+ tensors = [b[key] for b in batch if b[key] is not None]
558
+ if tensors and isinstance(tensors[0], torch.Tensor):
559
+ result[key] = torch.cat(tensors, dim=0)
560
+ else:
561
+ result[key] = tensors[0] if tensors else None
562
+ return result
563
+
564
+ # Datasets
565
+ preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
566
+ preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
567
+
568
+ if os.path.exists(preprocessed_file):
569
+ logger.info(f"Loading preprocessed data from {preprocessed_file}")
570
+ preprocessed_data = PreprocessedBatchDataset(preprocessed_file)
571
+
572
+ train_dataset = preprocessed_data
573
+ eval_dataset = None
574
+
575
+ if training_args.do_eval and data_args.eval_split_size and data_args.eval_split_size > 0 and len(preprocessed_data) > 1:
576
+ num_eval = max(1, int(len(preprocessed_data) * data_args.eval_split_size))
577
+ num_train = len(preprocessed_data) - num_eval
578
+ indices = list(range(len(preprocessed_data)))
579
+ import random
580
+ random.Random(training_args.seed).shuffle(indices)
581
+ train_indices = indices[:num_train]
582
+ eval_indices = indices[num_train:]
583
+ train_dataset = PreprocessedBatchSubset(preprocessed_data, train_indices)
584
+ eval_dataset = PreprocessedBatchSubset(preprocessed_data, eval_indices)
585
+ else:
586
+ logger.info(f"Preprocessed data not found at {preprocessed_file}, loading from raw JSONL/HF datasets")
587
+ verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
588
+ if data_args.train_jsonl is not None:
589
+ data_files: Dict[str, str] = {"train": data_args.train_jsonl}
590
+ if data_args.validation_jsonl is not None:
591
+ data_files["validation"] = data_args.validation_jsonl
592
+ raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
593
+ else:
594
+ if data_args.dataset_name is None:
595
+ raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
596
+ raw = load_dataset(
597
+ data_args.dataset_name,
598
+ data_args.dataset_config_name,
599
+ verification_mode=verification_mode,
600
+ cache_dir=model_args.cache_dir,
601
+ )
602
+ train_ds = raw[data_args.train_split_name]
603
+ eval_ds = None
604
+ if training_args.do_eval:
605
+ if data_args.eval_split_name and data_args.eval_split_name in raw:
606
+ eval_ds = raw[data_args.eval_split_name]
607
+ elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
608
+ split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
609
+ train_ds, eval_ds = split["train"], split["test"]
610
+
611
+ train_dataset = VibeVoiceDataset(
612
+ train_ds,
613
+ text_column=data_args.text_column_name,
614
+ audio_column=data_args.audio_column_name,
615
+ voice_prompts_column=data_args.voice_prompts_column_name,
616
+ )
617
+ eval_dataset = None
618
+ if eval_ds is not None:
619
+ eval_dataset = VibeVoiceDataset(
620
+ eval_ds,
621
+ text_column=data_args.text_column_name,
622
+ audio_column=data_args.audio_column_name,
623
+ voice_prompts_column=data_args.voice_prompts_column_name,
624
+ )
625
+
626
+ # Ratios/dims from processor+model
627
+ speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
628
+ semantic_dim = getattr(model.config, "semantic_vae_dim", None)
629
+ if semantic_dim is None:
630
+ try:
631
+ semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
632
+ except Exception:
633
+ semantic_dim = 128
634
+
635
+ compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
636
+
637
+ if os.path.exists(preprocessed_file):
638
+ data_collator = PreprocessedBatchCollator()
639
+ else:
640
+ data_collator = VibeVoiceCollator(
641
+ processor=processor,
642
+ max_length=data_args.max_length,
643
+ speech_compress_ratio=speech_compress_ratio,
644
+ semantic_vae_dim=semantic_dim,
645
+ compute_semantics=compute_semantics_flag,
646
+ debug_checks=False,
647
+ voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
648
+ )
649
+
650
+ class LoRADebugCallback(TrainerCallback):
651
+ def __init__(self, log_every_n_steps: int = 50):
652
+ self.log_every_n_steps = max(1, int(log_every_n_steps))
653
+ self.prev_param_norms: Dict[str, float] = {}
654
+ self.lora_param_names: List[str] = []
655
+
656
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
657
+ try:
658
+ if model is None:
659
+ return
660
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
661
+ self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
662
+ for n in self.lora_param_names:
663
+ p = named[n]
664
+ self.prev_param_norms[n] = float(p.data.norm().item())
665
+ total = len(self.lora_param_names)
666
+ req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
667
+ num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
668
+ num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
669
+ zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
670
+ logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
671
+ if total == 0:
672
+ logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
673
+ if req_grad != total:
674
+ logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
675
+ except Exception as e:
676
+ logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
677
+
678
+ def on_step_end(self, args, state, control, model=None, **kwargs):
679
+ try:
680
+ if model is None or len(self.lora_param_names) == 0:
681
+ return
682
+ step = int(getattr(state, "global_step", 0) or 0)
683
+ if step % self.log_every_n_steps != 0 and step != 1:
684
+ return
685
+ named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
686
+ changed_A = 0
687
+ changed_B = 0
688
+ zero_B = 0
689
+ eps = 1e-12
690
+ for n in self.lora_param_names:
691
+ p = named.get(n, None)
692
+ if p is None:
693
+ continue
694
+ prev = self.prev_param_norms.get(n, 0.0)
695
+ curr = float(p.data.norm().item())
696
+ if "lora_A" in n and abs(curr - prev) > eps:
697
+ changed_A += 1
698
+ if "lora_B" in n:
699
+ if abs(curr - prev) > eps:
700
+ changed_B += 1
701
+ if curr == 0.0:
702
+ zero_B += 1
703
+ self.prev_param_norms[n] = curr
704
+ total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
705
+ total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
706
+ logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
707
+ except Exception as e:
708
+ logger.warning(f"LoRA debug (on_step_end) failed: {e}")
709
+
710
+ class VibeVoiceTrainer(Trainer):
711
+ def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
712
+ labels = inputs.get("input_ids")
713
+ attention_mask = inputs.get("attention_mask")
714
+ acoustic_input_mask = inputs.get("acoustic_input_mask")
715
+
716
+ # Ensure semantic tensors exist and have correct dtype/device
717
+ sem = inputs.get("speech_semantic_tensors", None)
718
+ try:
719
+ target_dtype = next(model.model.semantic_connector.parameters()).dtype
720
+ except Exception:
721
+ target_dtype = model.get_input_embeddings().weight.dtype
722
+
723
+ if sem is None:
724
+ sm = inputs.get("speech_masks")
725
+ if sm is not None:
726
+ zeros = torch.zeros(
727
+ sm.size(0), sm.size(1),
728
+ getattr(model.config, "semantic_vae_dim", 128),
729
+ dtype=target_dtype,
730
+ device=sm.device,
731
+ )
732
+ inputs["speech_semantic_tensors"] = zeros
733
+ else:
734
+ if isinstance(sem, torch.Tensor):
735
+ inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
736
+
737
+ outputs = model(
738
+ input_ids=inputs.get("input_ids"),
739
+ attention_mask=attention_mask,
740
+ speech_tensors=inputs.get("speech_tensors"),
741
+ speech_masks=inputs.get("speech_masks"),
742
+ speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
743
+ acoustic_input_mask=acoustic_input_mask,
744
+ acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
745
+ speeches_loss_input=inputs.get("speeches_loss_input"),
746
+ ddpm_batch_mul=training_args.ddpm_batch_mul,
747
+ )
748
+
749
+ # Invariants: token/latent selection equality across views (warn, don't assert)
750
+ try:
751
+ al_mask = inputs.get("acoustic_loss_mask")
752
+ sp_masks = inputs.get("speech_masks")
753
+ sp_loss_sel = inputs.get("speeches_loss_input")
754
+ num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
755
+ num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
756
+ num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
757
+ num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
758
+ self.log({
759
+ "debug/num_tok_total": float(num_tok_total),
760
+ "debug/num_tok_loss": float(num_tok_loss),
761
+ "debug/num_lat_total": float(num_lat_total),
762
+ "debug/num_lat_loss": float(num_lat_loss),
763
+ })
764
+ if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
765
+ if num_tok_loss != num_lat_loss:
766
+ logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
767
+ except Exception:
768
+ pass
769
+
770
+ # CE Loss
771
+ logits = outputs.logits
772
+ ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
773
+ shift_logits = logits[:, :-1, :].contiguous()
774
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
775
+ ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
776
+
777
+ # Optional CE diagnostics
778
+ try:
779
+ self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
780
+ except Exception as e:
781
+ logger.warning(f"Failed invoking CE debug: {e}")
782
+
783
+ # Diffusion loss
784
+ diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
785
+ total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
786
+
787
+ # Logs
788
+ try:
789
+ prefix = "train" if model.training else "eval"
790
+ self.log({
791
+ f"{prefix}/ce_loss": ce_loss.detach().item(),
792
+ f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
793
+ })
794
+ if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
795
+ lr_val = self.optimizer.param_groups[0].get("lr", None)
796
+ if lr_val is not None:
797
+ self.log({"train/learning_rate_real": float(lr_val)})
798
+ except Exception:
799
+ pass
800
+
801
+ return (total, outputs) if return_outputs else total
802
+
803
+ def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
804
+ try:
805
+ if not getattr(training_args, "debug_ce_details", False):
806
+ return
807
+ step = int(getattr(self.state, "global_step", 0) or 0)
808
+ every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
809
+ if not (step <= 1 or (step % every_n == 0)):
810
+ return
811
+
812
+ with torch.no_grad():
813
+ vocab = shift_logits.size(-1)
814
+ per_token_loss = F.cross_entropy(
815
+ shift_logits.view(-1, vocab),
816
+ ce_labels.view(-1),
817
+ reduction="none",
818
+ ignore_index=-100,
819
+ ).view_as(ce_labels)
820
+
821
+ valid_mask = ce_labels.ne(-100)
822
+ num_valid = int(valid_mask.sum().item())
823
+ avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
824
+
825
+ per_ex_avgs = []
826
+ max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
827
+ B = ce_labels.size(0)
828
+ for b in range(min(B, max_examples)):
829
+ vb = valid_mask[b]
830
+ if int(vb.sum().item()) > 0:
831
+ per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
832
+ else:
833
+ per_ex_avgs.append(float("nan"))
834
+ logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
835
+ except Exception as e:
836
+ logger.warning(f"CE detailed debug failed: {e}")
837
+
838
+ # --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
839
+
840
+
841
+ def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
842
+ try:
843
+ target_dir = output_dir or self.args.output_dir
844
+ lora_out = os.path.join(target_dir, "lora")
845
+ os.makedirs(lora_out, exist_ok=True)
846
+
847
+ # --- LLM PEFT adapters (if LoRA-wrapped) ---
848
+ language_model = getattr(self.model.model, "language_model", None)
849
+ if hasattr(language_model, "save_pretrained"):
850
+ language_model.save_pretrained(lora_out)
851
+
852
+ # --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
853
+ pred_head = getattr(self.model.model, "prediction_head", None)
854
+ if hasattr(pred_head, "save_pretrained"):
855
+ ph_dir = os.path.join(lora_out, "diffusion_head")
856
+ os.makedirs(ph_dir, exist_ok=True)
857
+ pred_head.save_pretrained(ph_dir)
858
+
859
+ # --- ALWAYS save FULL diffusion head state_dict for fallback ---
860
+ if pred_head is not None and hasattr(pred_head, "state_dict"):
861
+ sd = pred_head.state_dict()
862
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
863
+ ph_dir = os.path.join(lora_out, "diffusion_head")
864
+ os.makedirs(ph_dir, exist_ok=True)
865
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
866
+
867
+ # --- Connectors (plain state_dicts) ---
868
+ ac = getattr(self.model.model, "acoustic_connector", None)
869
+ if ac is not None:
870
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
871
+ os.makedirs(ac_dir, exist_ok=True)
872
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
873
+
874
+ se = getattr(self.model.model, "semantic_connector", None)
875
+ if se is not None:
876
+ se_dir = os.path.join(lora_out, "semantic_connector")
877
+ os.makedirs(se_dir, exist_ok=True)
878
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
879
+
880
+ except Exception as e:
881
+ logger.warning(f"Failed to save LoRA assets: {e}")
882
+
883
+
884
+ # ------------- Build the Trainer -------------
885
+
886
+ # Resolve which adapters to apply in samples
887
+
888
+ ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cuda")
889
+
890
+ # --- CRITICAL FIX: CAST TRAINABLE PARAMS TO FP32 ---
891
+ # This prevents 'ValueError: Attempting to unscale FP16 gradients'
892
+ if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
893
+ print('>>> INFO: Enforcing float32 for trainable parameters (LoRA/Head) to fix GradScaler.')
894
+ for name, param in model.named_parameters():
895
+ if param.requires_grad:
896
+ param.data = param.data.to(torch.float32)
897
+ # ---------------------------------------------------
898
+
899
+ trainer = VibeVoiceTrainer(
900
+ model=model,
901
+ args=training_args,
902
+ train_dataset=train_dataset,
903
+ eval_dataset=eval_dataset,
904
+ data_collator=data_collator,
905
+ callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
906
+ )
907
+
908
+ # Optional debug pre-training save
909
+ if getattr(training_args, "debug_save", False):
910
+ try:
911
+ debug_dir = os.path.join(training_args.output_dir, "debug_initial")
912
+ lora_out = os.path.join(debug_dir, "lora")
913
+ os.makedirs(lora_out, exist_ok=True)
914
+ logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
915
+ # language model adapters / base
916
+ try:
917
+ if hasattr(model.model.language_model, "save_pretrained"):
918
+ model.model.language_model.save_pretrained(lora_out)
919
+ except Exception as e_lm:
920
+ logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
921
+ # diffusion head
922
+ try:
923
+ if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
924
+ model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
925
+ except Exception as e_head:
926
+ logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
927
+ # NEW: full diffusion head state_dict as fallback
928
+ try:
929
+ ph = getattr(model.model, "prediction_head", None)
930
+ if ph is not None and hasattr(ph, "state_dict"):
931
+ sd = ph.state_dict()
932
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
933
+ os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
934
+ torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
935
+ except Exception as e:
936
+ logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
937
+ # connectors
938
+ try:
939
+ ac_conn = getattr(model.model, "acoustic_connector", None)
940
+ if ac_conn is not None:
941
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
942
+ os.makedirs(ac_dir, exist_ok=True)
943
+ torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
944
+ except Exception as e_ac:
945
+ logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
946
+ try:
947
+ se_conn = getattr(model.model, "semantic_connector", None)
948
+ if se_conn is not None:
949
+ se_dir = os.path.join(lora_out, "semantic_connector")
950
+ os.makedirs(se_dir, exist_ok=True)
951
+ torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
952
+ except Exception as e_se:
953
+ logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
954
+ except Exception as e:
955
+ logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
956
+
957
+ if getattr(training_args, "gradient_checkpointing", False):
958
+ try:
959
+ model.gradient_checkpointing_enable()
960
+ except Exception:
961
+ logger.warning("Failed to enable gradient checkpointing on the model.")
962
+
963
+ # =========================================================================
964
+ # BUG FIX: Load Custom Weights from Checkpoint before resuming training
965
+ # =========================================================================
966
+ if training_args.do_train and training_args.resume_from_checkpoint:
967
+ checkpoint_path = None
968
+ if isinstance(training_args.resume_from_checkpoint, bool) and training_args.resume_from_checkpoint:
969
+ from transformers.trainer_utils import get_last_checkpoint
970
+ checkpoint_path = get_last_checkpoint(training_args.output_dir)
971
+ else:
972
+ checkpoint_path = training_args.resume_from_checkpoint
973
+
974
+ if checkpoint_path is not None and os.path.exists(checkpoint_path):
975
+ lora_dir = os.path.join(checkpoint_path, "lora")
976
+ if os.path.exists(lora_dir):
977
+ logger.info(f"*** Resuming custom weights (LoRA, Connectors, Head) from {lora_dir} ***")
978
+
979
+ # 1. Load LLM LoRA
980
+ if hasattr(model.model, "language_model"):
981
+ try:
982
+ from peft import load_peft_weights, set_peft_model_state_dict
983
+ adapters_weights = load_peft_weights(lora_dir)
984
+ set_peft_model_state_dict(model.model.language_model, adapters_weights)
985
+ logger.info("Successfully loaded LLM LoRA weights.")
986
+ except Exception as e:
987
+ logger.warning(f"Could not load LLM LoRA weights: {e}")
988
+
989
+ # 2. Load Diffusion Head
990
+ ph_full_path = os.path.join(lora_dir, "diffusion_head_full.bin")
991
+ if os.path.exists(ph_full_path) and hasattr(model.model, "prediction_head"):
992
+ try:
993
+ model.model.prediction_head.load_state_dict(torch.load(ph_full_path, map_location="cpu"), strict=False)
994
+ logger.info("Successfully loaded Diffusion Head weights.")
995
+ except Exception as e:
996
+ logger.warning(f"Failed to load Diffusion Head weights: {e}")
997
+
998
+ # 3. Load Acoustic Connector
999
+ ac_path = os.path.join(lora_dir, "acoustic_connector", "pytorch_model.bin")
1000
+ if os.path.exists(ac_path) and hasattr(model.model, "acoustic_connector"):
1001
+ try:
1002
+ model.model.acoustic_connector.load_state_dict(torch.load(ac_path, map_location="cpu"))
1003
+ logger.info("Successfully loaded Acoustic Connector weights.")
1004
+ except Exception as e:
1005
+ logger.warning(f"Failed to load Acoustic Connector weights: {e}")
1006
+
1007
+ # 4. Load Semantic Connector
1008
+ se_path = os.path.join(lora_dir, "semantic_connector", "pytorch_model.bin")
1009
+ if os.path.exists(se_path) and hasattr(model.model, "semantic_connector"):
1010
+ try:
1011
+ model.model.semantic_connector.load_state_dict(torch.load(se_path, map_location="cpu"))
1012
+ logger.info("Successfully loaded Semantic Connector weights.")
1013
+ except Exception as e:
1014
+ logger.warning(f"Failed to load Semantic Connector weights: {e}")
1015
+ else:
1016
+ logger.warning(f"No custom 'lora' directory found inside checkpoint: {checkpoint_path}")
1017
+ # =========================================================================
1018
+
1019
+ if training_args.do_train:
1020
+ trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
1021
+
1022
+ lora_out = os.path.join(training_args.output_dir, "lora")
1023
+ os.makedirs(lora_out, exist_ok=True)
1024
+
1025
+ # LLM PEFT (if any)
1026
+ lm = getattr(model.model, "language_model", None)
1027
+ if hasattr(lm, "save_pretrained"):
1028
+ lm.save_pretrained(lora_out)
1029
+
1030
+ # Diffusion head PEFT (if any)
1031
+ ph = getattr(model.model, "prediction_head", None)
1032
+ if hasattr(ph, "save_pretrained"):
1033
+ ph_dir = os.path.join(lora_out, "diffusion_head")
1034
+ os.makedirs(ph_dir, exist_ok=True)
1035
+ ph.save_pretrained(ph_dir)
1036
+
1037
+ # ALWAYS: full diffusion head state_dict fallback
1038
+ try:
1039
+ if ph is not None and hasattr(ph, "state_dict"):
1040
+ sd = ph.state_dict()
1041
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
1042
+ ph_dir = os.path.join(lora_out, "diffusion_head")
1043
+ os.makedirs(ph_dir, exist_ok=True)
1044
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
1045
+ except Exception as e:
1046
+ logger.warning(f"Failed to save FULL diffusion head at end: {e}")
1047
+
1048
+ # Connectors (if trained)
1049
+ try:
1050
+ ac = getattr(model.model, "acoustic_connector", None)
1051
+ if ac is not None:
1052
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
1053
+ os.makedirs(ac_dir, exist_ok=True)
1054
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
1055
+ except Exception as e:
1056
+ logger.warning(f"Failed to save acoustic_connector: {e}")
1057
+
1058
+ try:
1059
+ se = getattr(model.model, "semantic_connector", None)
1060
+ if se is not None:
1061
+ se_dir = os.path.join(lora_out, "semantic_connector")
1062
+ os.makedirs(se_dir, exist_ok=True)
1063
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
1064
+ except Exception as e:
1065
+ logger.warning(f"Failed to save semantic_connector: {e}")
1066
+
1067
+ if training_args.do_eval and eval_dataset is not None:
1068
+ trainer.evaluate()
1069
+
1070
+
1071
+ if __name__ == "__main__":
1072
+ main()
VibeVoice-finetuning/src/vibevoice/.DS_Store ADDED
Binary file (6.15 kB). View file
 
VibeVoice-finetuning/src/vibevoice/configs/qwen2.5_1.5b_64k.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "acoustic_vae_dim": 64,
4
+ "acoustic_tokenizer_config": {
5
+ "causal": true,
6
+ "channels": 1,
7
+ "conv_bias": true,
8
+ "conv_norm": "none",
9
+ "corpus_normalize": 0.0,
10
+ "decoder_depths": null,
11
+ "decoder_n_filters": 32,
12
+ "decoder_ratios": [
13
+ 8,
14
+ 5,
15
+ 5,
16
+ 4,
17
+ 2,
18
+ 2
19
+ ],
20
+ "disable_last_norm": true,
21
+ "encoder_depths": "3-3-3-3-3-3-8",
22
+ "encoder_n_filters": 32,
23
+ "encoder_ratios": [
24
+ 8,
25
+ 5,
26
+ 5,
27
+ 4,
28
+ 2,
29
+ 2
30
+ ],
31
+ "fix_std": 0.5,
32
+ "layer_scale_init_value": 1e-06,
33
+ "layernorm": "RMSNorm",
34
+ "layernorm_elementwise_affine": true,
35
+ "layernorm_eps": 1e-05,
36
+ "mixer_layer": "depthwise_conv",
37
+ "model_type": "vibepod_acoustic_tokenizer",
38
+ "pad_mode": "constant",
39
+ "std_dist_type": "gaussian",
40
+ "vae_dim": 64,
41
+ "weight_init_value": 0.01
42
+ },
43
+ "decoder_config": {
44
+ "attention_dropout": 0.0,
45
+ "hidden_act": "silu",
46
+ "hidden_size": 1536,
47
+ "initializer_range": 0.02,
48
+ "intermediate_size": 8960,
49
+ "max_position_embeddings": 65536,
50
+ "max_window_layers": 28,
51
+ "model_type": "qwen2",
52
+ "num_attention_heads": 12,
53
+ "num_hidden_layers": 28,
54
+ "num_key_value_heads": 2,
55
+ "rms_norm_eps": 1e-06,
56
+ "rope_scaling": null,
57
+ "rope_theta": 1000000.0,
58
+ "sliding_window": null,
59
+ "tie_word_embeddings": true,
60
+ "torch_dtype": "bfloat16",
61
+ "use_cache": true,
62
+ "use_sliding_window": false,
63
+ "vocab_size": 151936
64
+ },
65
+ "diffusion_head_config": {
66
+ "ddpm_batch_mul": 4,
67
+ "ddpm_beta_schedule": "cosine",
68
+ "ddpm_num_inference_steps": 20,
69
+ "ddpm_num_steps": 1000,
70
+ "diffusion_type": "ddpm",
71
+ "head_ffn_ratio": 3.0,
72
+ "head_layers": 4,
73
+ "hidden_size": 1536,
74
+ "latent_size": 64,
75
+ "model_type": "vibepod_diffusion_head",
76
+ "prediction_type": "v_prediction",
77
+ "rms_norm_eps": 1e-05,
78
+ "speech_vae_dim": 64
79
+ },
80
+ "model_type": "vibepod",
81
+ "semantic_tokenizer_config": {
82
+ "causal": true,
83
+ "channels": 1,
84
+ "conv_bias": true,
85
+ "conv_norm": "none",
86
+ "corpus_normalize": 0.0,
87
+ "disable_last_norm": true,
88
+ "encoder_depths": "3-3-3-3-3-3-8",
89
+ "encoder_n_filters": 32,
90
+ "encoder_ratios": [
91
+ 8,
92
+ 5,
93
+ 5,
94
+ 4,
95
+ 2,
96
+ 2
97
+ ],
98
+ "fix_std": 0,
99
+ "layer_scale_init_value": 1e-06,
100
+ "layernorm": "RMSNorm",
101
+ "layernorm_elementwise_affine": true,
102
+ "layernorm_eps": 1e-05,
103
+ "mixer_layer": "depthwise_conv",
104
+ "model_type": "vibepod_semantic_tokenizer",
105
+ "pad_mode": "constant",
106
+ "std_dist_type": "none",
107
+ "vae_dim": 128,
108
+ "weight_init_value": 0.01
109
+ },
110
+ "semantic_vae_dim": 128,
111
+ "torch_dtype": "bfloat16"
112
+ }
VibeVoice-finetuning/src/vibevoice/configs/qwen2.5_7b_32k.json ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "acoustic_vae_dim": 64,
4
+ "acoustic_tokenizer_config": {
5
+ "causal": true,
6
+ "channels": 1,
7
+ "conv_bias": true,
8
+ "conv_norm": "none",
9
+ "corpus_normalize": 0.0,
10
+ "decoder_depths": null,
11
+ "decoder_n_filters": 32,
12
+ "decoder_ratios": [
13
+ 8,
14
+ 5,
15
+ 5,
16
+ 4,
17
+ 2,
18
+ 2
19
+ ],
20
+ "disable_last_norm": true,
21
+ "encoder_depths": "3-3-3-3-3-3-8",
22
+ "encoder_n_filters": 32,
23
+ "encoder_ratios": [
24
+ 8,
25
+ 5,
26
+ 5,
27
+ 4,
28
+ 2,
29
+ 2
30
+ ],
31
+ "fix_std": 0.5,
32
+ "layer_scale_init_value": 1e-06,
33
+ "layernorm": "RMSNorm",
34
+ "layernorm_elementwise_affine": true,
35
+ "layernorm_eps": 1e-05,
36
+ "mixer_layer": "depthwise_conv",
37
+ "model_type": "vibepod_acoustic_tokenizer",
38
+ "pad_mode": "constant",
39
+ "std_dist_type": "gaussian",
40
+ "vae_dim": 64,
41
+ "weight_init_value": 0.01
42
+ },
43
+ "decoder_config": {
44
+ "attention_dropout": 0.0,
45
+ "hidden_act": "silu",
46
+ "hidden_size": 3584,
47
+ "initializer_range": 0.02,
48
+ "intermediate_size": 18944,
49
+ "max_position_embeddings": 32768,
50
+ "max_window_layers": 28,
51
+ "model_type": "qwen2",
52
+ "num_attention_heads": 28,
53
+ "num_hidden_layers": 28,
54
+ "num_key_value_heads": 4,
55
+ "rms_norm_eps": 1e-06,
56
+ "rope_theta": 1000000.0,
57
+ "sliding_window": null,
58
+ "tie_word_embeddings": false,
59
+ "torch_dtype": "bfloat16",
60
+ "transformers_version": "4.40.1",
61
+ "use_cache": true,
62
+ "use_mrope": false,
63
+ "use_sliding_window": false,
64
+ "vocab_size": 152064
65
+ },
66
+ "diffusion_head_config": {
67
+ "ddpm_batch_mul": 4,
68
+ "ddpm_beta_schedule": "cosine",
69
+ "ddpm_num_inference_steps": 20,
70
+ "ddpm_num_steps": 1000,
71
+ "diffusion_type": "ddpm",
72
+ "head_ffn_ratio": 3.0,
73
+ "head_layers": 4,
74
+ "hidden_size": 3584,
75
+ "latent_size": 64,
76
+ "model_type": "vibepod_diffusion_head",
77
+ "prediction_type": "v_prediction",
78
+ "rms_norm_eps": 1e-05,
79
+ "speech_vae_dim": 64
80
+ },
81
+ "model_type": "vibepod",
82
+ "semantic_tokenizer_config": {
83
+ "causal": true,
84
+ "channels": 1,
85
+ "conv_bias": true,
86
+ "conv_norm": "none",
87
+ "corpus_normalize": 0.0,
88
+ "disable_last_norm": true,
89
+ "encoder_depths": "3-3-3-3-3-3-8",
90
+ "encoder_n_filters": 32,
91
+ "encoder_ratios": [
92
+ 8,
93
+ 5,
94
+ 5,
95
+ 4,
96
+ 2,
97
+ 2
98
+ ],
99
+ "fix_std": 0,
100
+ "layer_scale_init_value": 1e-06,
101
+ "layernorm": "RMSNorm",
102
+ "layernorm_elementwise_affine": true,
103
+ "layernorm_eps": 1e-05,
104
+ "mixer_layer": "depthwise_conv",
105
+ "model_type": "vibepod_semantic_tokenizer",
106
+ "pad_mode": "constant",
107
+ "std_dist_type": "none",
108
+ "vae_dim": 128,
109
+ "weight_init_value": 0.01
110
+ },
111
+ "semantic_vae_dim": 128,
112
+ "torch_dtype": "bfloat16"
113
+ }
VibeVoice-finetuning/src/vibevoice/modular/__init__.py ADDED
File without changes
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (175 Bytes). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (167 Bytes). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-311.pyc ADDED
Binary file (9.25 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-312.pyc ADDED
Binary file (8.32 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-311.pyc ADDED
Binary file (29.3 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-312.pyc ADDED
Binary file (27.4 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-311.pyc ADDED
Binary file (15.8 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-312.pyc ADDED
Binary file (14.5 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-311.pyc ADDED
Binary file (8.29 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-312.pyc ADDED
Binary file (7.66 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-311.pyc ADDED
Binary file (65.4 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-312.pyc ADDED
Binary file (60.9 kB). View file
 
VibeVoice-finetuning/src/vibevoice/modular/configuration_vibevoice.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ VibeVoice_AcousticTokenizer model configuration"""
2
+
3
+ from typing import Dict, List, Optional, Tuple
4
+
5
+ from transformers.configuration_utils import PretrainedConfig
6
+ from transformers.utils import logging
7
+
8
+ from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+
13
+ class VibeVoiceAcousticTokenizerConfig(PretrainedConfig):
14
+ model_type = "vibevoice_acoustic_tokenizer"
15
+
16
+ def __init__(
17
+ self,
18
+ channels: int = 1,
19
+ corpus_normalize: float = 0.0,
20
+ causal: bool = True,
21
+ vae_dim: int = 64,
22
+ fix_std: float = 0.5,
23
+ std_dist_type: str = 'gaussian',
24
+ # common
25
+ mixer_layer: str = 'depthwise_conv',
26
+ conv_norm: str = 'none',
27
+ pad_mode: str = 'constant',
28
+ disable_last_norm: bool = True,
29
+ layernorm: str = 'RMSNorm',
30
+ layernorm_eps: float = 1e-5,
31
+ layernorm_elementwise_affine: bool = True,
32
+ conv_bias: bool = True,
33
+ layer_scale_init_value: float = 1e-6,
34
+ weight_init_value: float = 1e-2,
35
+ # encoder specific
36
+ encoder_n_filters: int = 32,
37
+ encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2],
38
+ encoder_depths: str = "3-3-3-3-3-3-8",
39
+ # decoder specific
40
+ decoder_n_filters: int = 32,
41
+ decoder_ratios: Optional[List[int]] = None, # if None, same as encoder
42
+ decoder_depths: Optional[str] = None,
43
+ **kwargs
44
+ ):
45
+ super().__init__(**kwargs)
46
+ self.channels = channels
47
+ self.corpus_normalize = corpus_normalize
48
+ self.causal = causal
49
+ self.vae_dim = vae_dim
50
+ self.fix_std = fix_std
51
+ self.std_dist_type = std_dist_type
52
+
53
+ # common parameters
54
+ self.conv_norm = conv_norm
55
+ self.pad_mode = pad_mode
56
+ self.layernorm_eps = layernorm_eps
57
+ self.disable_last_norm = disable_last_norm
58
+ self.layernorm = layernorm
59
+ self.layernorm_elementwise_affine = layernorm_elementwise_affine
60
+ self.conv_bias = conv_bias
61
+ self.layer_scale_init_value = layer_scale_init_value
62
+ self.weight_init_value = weight_init_value
63
+ self.mixer_layer = mixer_layer
64
+
65
+ # encoder specific parameters
66
+ self.encoder_n_filters = encoder_n_filters
67
+ self.encoder_ratios = encoder_ratios
68
+ self.encoder_depths = encoder_depths
69
+
70
+ # decoder specific parameters
71
+ self.decoder_ratios = decoder_ratios if decoder_ratios is not None else encoder_ratios
72
+ self.decoder_n_filters = decoder_n_filters
73
+ self.decoder_depths = decoder_depths
74
+
75
+
76
+ class VibeVoiceSemanticTokenizerConfig(PretrainedConfig):
77
+ model_type = "vibevoice_semantic_tokenizer"
78
+
79
+ def __init__(
80
+ self,
81
+ channels: int = 1,
82
+ corpus_normalize: float = 0.0,
83
+ causal: bool = True,
84
+ vae_dim: int = 64,
85
+ fix_std: float = 0,
86
+ std_dist_type: str = 'none',
87
+ # common
88
+ mixer_layer: str = 'depthwise_conv',
89
+ conv_norm: str = 'none',
90
+ pad_mode: str = 'constant',
91
+ disable_last_norm: bool = True,
92
+ layernorm: str = 'RMSNorm',
93
+ layernorm_eps: float = 1e-5,
94
+ layernorm_elementwise_affine: bool = True,
95
+ conv_bias: bool = True,
96
+ layer_scale_init_value: float = 1e-6,
97
+ weight_init_value: float = 1e-2,
98
+ # encoder specific
99
+ encoder_n_filters: int = 32,
100
+ encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2],
101
+ encoder_depths: str = "3-3-3-3-3-3-8",
102
+ **kwargs
103
+ ):
104
+ super().__init__(**kwargs)
105
+ self.channels = channels
106
+ self.corpus_normalize = corpus_normalize
107
+ self.causal = causal
108
+ self.vae_dim = vae_dim
109
+ self.fix_std = fix_std
110
+ self.std_dist_type = std_dist_type
111
+
112
+ # common parameters
113
+ self.conv_norm = conv_norm
114
+ self.pad_mode = pad_mode
115
+ self.layernorm_eps = layernorm_eps
116
+ self.disable_last_norm = disable_last_norm
117
+ self.layernorm = layernorm
118
+ self.layernorm_elementwise_affine = layernorm_elementwise_affine
119
+ self.conv_bias = conv_bias
120
+ self.layer_scale_init_value = layer_scale_init_value
121
+ self.weight_init_value = weight_init_value
122
+ self.mixer_layer = mixer_layer
123
+
124
+ # encoder specific parameters
125
+ self.encoder_n_filters = encoder_n_filters
126
+ self.encoder_ratios = encoder_ratios
127
+ self.encoder_depths = encoder_depths
128
+
129
+
130
+ class VibeVoiceDiffusionHeadConfig(PretrainedConfig):
131
+ model_type = "vibevoice_diffusion_head"
132
+
133
+ def __init__(
134
+ self,
135
+ hidden_size=768,
136
+ head_layers=4,
137
+ head_ffn_ratio=3.0,
138
+ rms_norm_eps=1e-5,
139
+ latent_size=64,
140
+ speech_vae_dim=None,
141
+ prediction_type="v_prediction",
142
+ diffusion_type="ddpm",
143
+ ddpm_num_steps=1000,
144
+ ddpm_num_inference_steps=20,
145
+ ddpm_beta_schedule="cosine",
146
+ ddpm_batch_mul=4,
147
+ **kwargs
148
+ ):
149
+ self.hidden_size = hidden_size
150
+ self.head_layers = head_layers
151
+ self.head_ffn_ratio = head_ffn_ratio
152
+ self.rms_norm_eps = rms_norm_eps
153
+ self.latent_size = latent_size
154
+ self.speech_vae_dim = speech_vae_dim
155
+ self.prediction_type = prediction_type
156
+ self.diffusion_type = diffusion_type
157
+ self.ddpm_num_steps = ddpm_num_steps
158
+ self.ddpm_num_inference_steps = ddpm_num_inference_steps
159
+ self.ddpm_beta_schedule = ddpm_beta_schedule
160
+ self.ddpm_batch_mul = ddpm_batch_mul
161
+
162
+ super().__init__(**kwargs)
163
+
164
+ class VibeVoiceConfig(PretrainedConfig):
165
+ model_type = "vibevoice"
166
+ is_composition = True
167
+ sub_configs = {
168
+ "acoustic_tokenizer_config": VibeVoiceAcousticTokenizerConfig,
169
+ "semantic_tokenizer_config": VibeVoiceSemanticTokenizerConfig,
170
+ "decoder_config": Qwen2Config,
171
+ "diffusion_head_config": VibeVoiceDiffusionHeadConfig,
172
+ }
173
+ # keys_to_ignore_at_inference = ["past_key_values"]
174
+ # Default tensor parallel plan for base model `Qwen2`
175
+ base_model_tp_plan = {
176
+ "layers.*.self_attn.q_proj": "colwise",
177
+ "layers.*.self_attn.k_proj": "colwise",
178
+ "layers.*.self_attn.v_proj": "colwise",
179
+ "layers.*.self_attn.o_proj": "rowwise",
180
+ "layers.*.mlp.gate_proj": "colwise",
181
+ "layers.*.mlp.up_proj": "colwise",
182
+ "layers.*.mlp.down_proj": "rowwise",
183
+ }
184
+
185
+ def __init__(
186
+ self,
187
+ acoustic_tokenizer_config=None,
188
+ semantic_tokenizer_config=None,
189
+ decoder_config=None,
190
+ diffusion_head_config=None,
191
+ **kwargs
192
+ ):
193
+
194
+ # kwargs["_attn_implementation"] = "flash_attention_2"
195
+ kwargs["_attn_implementation_autoset"] = False
196
+
197
+ if acoustic_tokenizer_config is None:
198
+ self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"]()
199
+ elif isinstance(acoustic_tokenizer_config, dict):
200
+ acoustic_tokenizer_config["model_type"] = "vibevoice_acoustic_tokenizer"
201
+ self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"](**acoustic_tokenizer_config)
202
+ elif isinstance(acoustic_tokenizer_config, VibeVoiceAcousticTokenizerConfig):
203
+ # If an instance of the config class is provided
204
+ self.acoustic_tokenizer_config = acoustic_tokenizer_config
205
+
206
+ if semantic_tokenizer_config is None:
207
+ self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"]()
208
+ elif isinstance(semantic_tokenizer_config, dict):
209
+ semantic_tokenizer_config["model_type"] = "vibevoice_semantic_tokenizer"
210
+ self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"](**semantic_tokenizer_config)
211
+ elif isinstance(semantic_tokenizer_config, VibeVoiceSemanticTokenizerConfig):
212
+ # If an instance of the config class is provided
213
+ self.semantic_tokenizer_config = semantic_tokenizer_config
214
+
215
+ if decoder_config is None:
216
+ self.decoder_config = self.sub_configs["decoder_config"]()
217
+ elif isinstance(decoder_config, dict):
218
+ # If a dictionary is provided, instantiate the config class with it
219
+ # self.decoder_config = self.sub_configs["decoder_config"](**decoder_config)
220
+ if decoder_config.get("model_type", '') == "qwen2":
221
+ self.decoder_config = Qwen2Config(**decoder_config)
222
+ else:
223
+ raise ValueError(f"Unsupported decoder model type: {decoder_config.get('model_type', '')}")
224
+ elif isinstance(decoder_config, (Qwen2Config,)):
225
+ # If an instance of the config class is provided
226
+ self.decoder_config = decoder_config
227
+
228
+ if diffusion_head_config is None:
229
+ self.diffusion_head_config = self.sub_configs["diffusion_head_config"]()
230
+ elif isinstance(diffusion_head_config, dict):
231
+ diffusion_head_config["model_type"] = "vibevoice_diffusion_head"
232
+ self.diffusion_head_config = self.sub_configs["diffusion_head_config"](**diffusion_head_config)
233
+ elif isinstance(diffusion_head_config, VibeVoiceDiffusionHeadConfig):
234
+ # If an instance of the config class is provided
235
+ self.diffusion_head_config = diffusion_head_config
236
+
237
+ # other parameters
238
+ self.acoustic_vae_dim = getattr(self.acoustic_tokenizer_config, 'vae_dim', 64)
239
+ self.semantic_vae_dim = getattr(self.semantic_tokenizer_config, 'vae_dim', 128)
240
+
241
+ super().__init__(**kwargs)
242
+
243
+ __all__ = [
244
+ "VibeVoiceAcousticTokenizerConfig",
245
+ "VibeVoiceSemanticTokenizerConfig",
246
+ "VibeVoiceDiffusionHeadConfig",
247
+ "VibeVoiceConfig"
248
+ ]
VibeVoice-finetuning/src/vibevoice/modular/modeling_vibevoice.py ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict, List, Optional, Tuple, Union, Callable
3
+ from tqdm import tqdm
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import torch.distributed as dist
8
+
9
+ from transformers.models.auto import AutoModel, AutoModelForCausalLM
10
+
11
+ from transformers.activations import ACT2FN
12
+ from transformers.modeling_outputs import CausalLMOutput, BaseModelOutputWithPast, ModelOutput
13
+ from transformers.models.llama.modeling_llama import LlamaRMSNorm
14
+ from transformers import modeling_utils
15
+ from transformers.modeling_utils import PreTrainedModel
16
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
17
+ from transformers.utils import logging
18
+
19
+
20
+ from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceAcousticTokenizerModel, VibeVoiceSemanticTokenizerModel
21
+ from .modular_vibevoice_diffusion_head import VibeVoiceDiffusionHead
22
+ from vibevoice.schedule.dpm_solver import DPMSolverMultistepScheduler
23
+
24
+ from .configuration_vibevoice import VibeVoiceConfig
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None:
30
+ modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", "rowwise"]
31
+
32
+ @dataclass
33
+ class VibeVoiceCausalLMOutputWithPast(ModelOutput):
34
+ loss: Optional[torch.FloatTensor] = None
35
+ diffusion_loss: Optional[torch.FloatTensor] = None
36
+ speech_token_num: Optional[int] = None
37
+ logits: torch.FloatTensor = None
38
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
39
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
40
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
41
+
42
+
43
+ @dataclass
44
+ class VibeVoiceGenerationOutput(ModelOutput):
45
+ """
46
+ Output type for VibeVoice generation.
47
+
48
+ Args:
49
+ sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
50
+ The generated sequences.
51
+ speech_outputs (`List[torch.FloatTensor]`, *optional*):
52
+ List of generated speech waveforms or latents for each speech segment.
53
+ """
54
+ sequences: torch.LongTensor = None
55
+ speech_outputs: Optional[List[torch.FloatTensor]] = None
56
+
57
+
58
+ class SpeechConnector(nn.Module):
59
+ def __init__(self, input_dim, output_dim):
60
+ super().__init__()
61
+ self.fc1 = nn.Linear(input_dim, output_dim)
62
+ self.norm = LlamaRMSNorm(output_dim, eps=1e-6)
63
+ self.fc2 = nn.Linear(output_dim, output_dim)
64
+
65
+ def forward(self, features, **kwargs):
66
+ x = self.fc1(features)
67
+ x = self.norm(x)
68
+ x = self.fc2(x)
69
+ return x
70
+
71
+
72
+ # @auto_docstring
73
+ class VibeVoicePreTrainedModel(PreTrainedModel):
74
+ config_class = VibeVoiceConfig
75
+ base_model_prefix = "model"
76
+ supports_gradient_checkpointing = True
77
+ _skip_keys_device_placement = "past_key_values"
78
+ _supports_cache_class = True
79
+ _supports_flash_attn_2 = True
80
+ _supports_sdpa = True
81
+ _supports_quantized_cache = True
82
+ _supports_static_cache = True
83
+ _supports_attention_backend = True
84
+
85
+ def _init_weights(self, module):
86
+ if isinstance(module, VibeVoiceDiffusionHead):
87
+ module.initialize_weights()
88
+ return
89
+
90
+ # Use the language model's initializer_range if available
91
+ if hasattr(self.config, 'language_model_config') and hasattr(self.config.language_model_config, 'initializer_range'):
92
+ std = self.config.language_model_config.initializer_range
93
+ elif hasattr(self.config, 'decoder_config') and hasattr(self.config.decoder_config, 'initializer_range'):
94
+ std = self.config.decoder_config.initializer_range
95
+ else:
96
+ std = 0.02 # Default value
97
+
98
+ if isinstance(module, nn.Linear):
99
+ module.weight.data.normal_(mean=0.0, std=std)
100
+ if module.bias is not None:
101
+ module.bias.data.zero_()
102
+ elif isinstance(module, nn.LayerNorm):
103
+ module.weight.data.fill_(1.0)
104
+ module.bias.data.zero_()
105
+
106
+ # @auto_docstring
107
+ class VibeVoiceModel(VibeVoicePreTrainedModel):
108
+ def __init__(self, config):
109
+ super().__init__(config)
110
+
111
+ if hasattr(config, 'torch_dtype') and config.torch_dtype is not None:
112
+ if isinstance(config.torch_dtype, str):
113
+ dtype = getattr(torch, config.torch_dtype)
114
+ else:
115
+ dtype = config.torch_dtype
116
+ else:
117
+ dtype = torch.float32
118
+
119
+ # Initialize Qwen2 model for language modeling
120
+ lm_config = config.decoder_config
121
+ self.language_model = AutoModel.from_config(lm_config)
122
+
123
+ # Initialize speech components if needed
124
+ self.acoustic_tokenizer = AutoModel.from_config(config.acoustic_tokenizer_config).to(dtype)
125
+ self.semantic_tokenizer = AutoModel.from_config(config.semantic_tokenizer_config).to(dtype)
126
+
127
+ self.acoustic_connector = SpeechConnector(config.acoustic_vae_dim, lm_config.hidden_size).to(dtype)
128
+ self.semantic_connector = SpeechConnector(config.semantic_vae_dim, lm_config.hidden_size).to(dtype)
129
+
130
+ # Register scaling factors as buffers - use 1D tensors for FSDP compatibility
131
+ self.register_buffer('speech_scaling_factor', torch.tensor(float('nan')))
132
+ self.register_buffer('speech_bias_factor', torch.tensor(float('nan')))
133
+
134
+ # Initialize prediction head for speech generation
135
+ self.prediction_head = AutoModel.from_config(config.diffusion_head_config).to(dtype)
136
+
137
+ # Initialize noise scheduler
138
+ self.noise_scheduler = DPMSolverMultistepScheduler(
139
+ num_train_timesteps=config.diffusion_head_config.ddpm_num_steps,
140
+ beta_schedule=config.diffusion_head_config.ddpm_beta_schedule,
141
+ prediction_type=config.diffusion_head_config.prediction_type
142
+ )
143
+
144
+ def get_input_embeddings(self):
145
+ if hasattr(self.language_model, 'embed_tokens'):
146
+ # If the language model has an embed_tokens attribute, return it
147
+ return self.language_model.embed_tokens
148
+
149
+ for name, attr in self.language_model.fullmap.items(): # parallel by nnscaler, the name is changed
150
+ if attr.orig_name == 'embed_tokens.weight':
151
+ return getattr(self.language_model, name)
152
+ assert False, 'should not arrive here'
153
+
154
+ def set_input_embeddings(self, value):
155
+ self.language_model.embed_tokens = value
156
+
157
+ def set_speech_tokenizers(self, acoustic_tokenizer=None, semantic_tokenizer=None):
158
+ """Set the speech tokenizers used for encoding and decoding speech."""
159
+ self.acoustic_tokenizer = acoustic_tokenizer
160
+ self.semantic_tokenizer = semantic_tokenizer
161
+
162
+ # Reset the encoder to evaluation mode
163
+ if self.acoustic_tokenizer is not None:
164
+ self.acoustic_tokenizer.eval()
165
+
166
+ if self.semantic_tokenizer is not None:
167
+ self.semantic_tokenizer.eval()
168
+
169
+ def forward(
170
+ self,
171
+ input_ids: torch.LongTensor = None,
172
+ attention_mask: Optional[torch.Tensor] = None,
173
+ position_ids: Optional[torch.LongTensor] = None,
174
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
175
+ inputs_embeds: Optional[torch.FloatTensor] = None,
176
+ use_cache: Optional[bool] = None,
177
+ output_attentions: Optional[bool] = None,
178
+ output_hidden_states: Optional[bool] = None,
179
+ return_dict: Optional[bool] = None,
180
+ cache_position: Optional[torch.LongTensor] = None,
181
+ **kwargs,
182
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
183
+
184
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
185
+
186
+ # Forward through language model
187
+ outputs = self.language_model(
188
+ input_ids=input_ids,
189
+ attention_mask=attention_mask,
190
+ position_ids=position_ids,
191
+ past_key_values=past_key_values,
192
+ inputs_embeds=inputs_embeds,
193
+ use_cache=use_cache,
194
+ output_attentions=output_attentions,
195
+ output_hidden_states=output_hidden_states,
196
+ return_dict=return_dict,
197
+ cache_position=cache_position,
198
+ **kwargs,
199
+ )
200
+
201
+ if not return_dict:
202
+ return outputs
203
+
204
+ return BaseModelOutputWithPast(
205
+ last_hidden_state=outputs.last_hidden_state,
206
+ past_key_values=outputs.past_key_values,
207
+ hidden_states=outputs.hidden_states,
208
+ attentions=outputs.attentions,
209
+ )
210
+
211
+
212
+ class VibeVoiceForConditionalGeneration(VibeVoicePreTrainedModel):
213
+ _tied_weights_keys = ["lm_head.weight"]
214
+ _tp_plan = {"lm_head": "colwise_rep"}
215
+
216
+ def __init__(self, config):
217
+ super().__init__(config)
218
+ self.model = VibeVoiceModel(config)
219
+ self.vocab_size = config.decoder_config.vocab_size
220
+ self.lm_head = nn.Linear(config.decoder_config.hidden_size, self.vocab_size, bias=False)
221
+
222
+ self.post_init()
223
+
224
+ def get_input_embeddings(self):
225
+ return self.model.get_input_embeddings()
226
+
227
+ def set_input_embeddings(self, value):
228
+ self.model.set_input_embeddings(value)
229
+
230
+ def get_output_embeddings(self):
231
+ return self.lm_head
232
+
233
+ def set_decoder(self, decoder):
234
+ self.model.language_model = decoder
235
+
236
+ def get_decoder(self):
237
+ return self.model.language_model
238
+
239
+ def tie_weights(self):
240
+ """
241
+ Tie the weights between the input embeddings and the output embeddings.
242
+ """
243
+ if getattr(self.config.decoder_config, 'tie_word_embeddings', False):
244
+ # The standard PreTrainedModel method will handle the tying.
245
+ # It typically does a simple parameter object assignment, which is
246
+ # CORRECT to do BEFORE FSDP wraps the model.
247
+ output_embeddings = self.get_output_embeddings()
248
+ input_embeddings = self.get_input_embeddings()
249
+ if hasattr(input_embeddings, 'weight'):
250
+ output_embeddings.weight = input_embeddings.weight
251
+ else:
252
+ # maybe returned input_embeddings a tensor directly
253
+ output_embeddings.weight = input_embeddings
254
+
255
+ if getattr(output_embeddings, "bias", None) is not None:
256
+ output_embeddings.bias.data = nn.functional.pad(
257
+ output_embeddings.bias.data,
258
+ (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
259
+ "constant",
260
+ 0,
261
+ )
262
+ print("✅ Tied input and output embeddings using standard assignment.")
263
+ else:
264
+ print("ℹ️ tie_word_embeddings is False, not tying weights.")
265
+
266
+ # Also, ensure set_output_embeddings is safe, though your implementation looks okay.
267
+ # The key is to avoid calling it after accelerator.prepare().
268
+ def set_output_embeddings(self, new_embeddings):
269
+ # Your current implementation using data.copy_ is good practice,
270
+ # but the best way is to not call this after prepare().
271
+ self.lm_head = new_embeddings
272
+
273
+ def forward_speech_features(
274
+ self,
275
+ speech_tensors=None,
276
+ speech_masks=None,
277
+ speech_type="audio",
278
+ return_unmask=False
279
+ ):
280
+ if speech_tensors is None:
281
+ # Use config to get vae_dim instead of non-existent self.args
282
+ vae_dim = self.config.acoustic_tokenizer_config.vae_dim
283
+ audio_features = torch.zeros(1, 1, vae_dim).to(self.get_input_embeddings().weight)
284
+ connect_features = self.model.acoustic_connector(audio_features)
285
+ return audio_features, connect_features
286
+ else:
287
+ with torch.no_grad():
288
+ if speech_type == "audio":
289
+ with torch.no_grad():
290
+ frames = self.model.acoustic_tokenizer.encode(speech_tensors.unsqueeze(1))[0][0]
291
+ audio_tokens = frames.sample(self.model.acoustic_tokenizer.std_dist_type)[0]
292
+
293
+ elif speech_type == "vae":
294
+ # Use config to get vae_dim instead of non-existent self.args
295
+ vae_dim = self.config.acoustic_tokenizer_config.vae_dim
296
+ speech_mode = speech_tensors.reshape(speech_tensors.size(0), -1, vae_dim)
297
+
298
+ # gaussian sample from the speech_mode
299
+ batch_size = speech_mode.size(0)
300
+ value = self.model.acoustic_tokenizer.fix_std / 0.8
301
+ std = torch.randn(batch_size, dtype=speech_mode.dtype, device=speech_mode.device) * value
302
+ std = std.view(-1, *[1] * (speech_mode.dim() - 1))
303
+ audio_tokens = speech_mode + std * torch.randn(speech_mode.shape).to(speech_mode)
304
+ else:
305
+ raise NotImplementedError(f"Speech type {speech_type} not implemented")
306
+
307
+ if torch.isnan(self.model.speech_scaling_factor) or torch.isnan(self.model.speech_bias_factor):
308
+ scaling_factor = 1. / audio_tokens[speech_masks].flatten().std()
309
+ bias_factor = -audio_tokens[speech_masks].flatten().mean()
310
+
311
+ # Only use distributed operations if the process group is initialized
312
+ if dist.is_available() and dist.is_initialized():
313
+ dist.all_reduce(scaling_factor, op=dist.ReduceOp.SUM)
314
+ dist.all_reduce(bias_factor, op=dist.ReduceOp.SUM)
315
+ world_size = dist.get_world_size()
316
+ self.model.speech_scaling_factor.copy_(scaling_factor / world_size)
317
+ self.model.speech_bias_factor.copy_(bias_factor / world_size)
318
+ print(f"Speech scaling factor (distributed): {self.model.speech_scaling_factor}, bias factor: {self.model.speech_bias_factor}", flush=True)
319
+ else:
320
+ # Single process case
321
+ self.model.speech_scaling_factor.copy_(scaling_factor)
322
+ self.model.speech_bias_factor.copy_(bias_factor)
323
+ print(f"Speech scaling factor (single process): {self.model.speech_scaling_factor}, bias factor: {self.model.speech_bias_factor}", flush=True)
324
+
325
+ audio_features = (audio_tokens + self.model.speech_bias_factor) * self.model.speech_scaling_factor
326
+
327
+ connect_features = self.model.acoustic_connector(audio_features)
328
+ if return_unmask:
329
+ return audio_features, connect_features
330
+ return audio_features[speech_masks], connect_features[speech_masks]
331
+
332
+ def forward(
333
+ self,
334
+ input_ids: torch.LongTensor = None,
335
+ attention_mask: Optional[torch.Tensor] = None,
336
+ position_ids: Optional[torch.LongTensor] = None,
337
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
338
+ inputs_embeds: Optional[torch.FloatTensor] = None,
339
+ labels: Optional[torch.LongTensor] = None,
340
+ use_cache: Optional[bool] = False,
341
+ output_attentions: Optional[bool] = None,
342
+ output_hidden_states: Optional[bool] = None,
343
+ return_dict: Optional[bool] = None,
344
+ cache_position: Optional[torch.LongTensor] = None,
345
+ # New arguments for speech processing and loss calculation
346
+ speech_tensors: Optional[torch.FloatTensor] = None,
347
+ speech_masks: Optional[torch.BoolTensor] = None,
348
+ speeches_loss_input: Optional[torch.FloatTensor] = None,
349
+ speech_semantic_tensors: Optional[torch.FloatTensor] = None,
350
+ acoustic_input_mask: Optional[torch.BoolTensor] = None,
351
+ acoustic_loss_mask: Optional[torch.BoolTensor] = None,
352
+ ddpm_batch_mul: int = 1,
353
+ **kwargs: Optional[Dict[str, Union[torch.Tensor, str]]],
354
+ ) -> Union[Tuple, VibeVoiceCausalLMOutputWithPast]:
355
+
356
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
357
+
358
+ x = self.get_input_embeddings()(input_ids)
359
+
360
+ semantic_speech_all_connect_features = self.model.semantic_connector(speech_semantic_tensors)
361
+ if speeches_loss_input is not None:
362
+ # only part audio need diffuse
363
+ speech_all_features, speech_all_connect_features = self.forward_speech_features(
364
+ speech_tensors=speech_tensors.type_as(x) if speech_tensors is not None else None,
365
+ speech_masks=speech_masks,
366
+ speech_type=kwargs.get("speech_type", "audio"),
367
+ return_unmask=True
368
+ )
369
+ if speech_tensors is not None:
370
+ if semantic_speech_all_connect_features is not None:
371
+ x[acoustic_input_mask] = speech_all_connect_features[speech_masks] + semantic_speech_all_connect_features[speech_masks]
372
+ else:
373
+ x[acoustic_input_mask] = speech_all_connect_features[speech_masks]
374
+ speech_features = speech_all_features[speeches_loss_input & speech_masks] # only part audio need diffuse
375
+ speech_connect_features = speech_all_connect_features[speeches_loss_input & speech_masks]
376
+ # Forward-time consistency check: selected latent count should match number of acoustic placeholders
377
+ try:
378
+ if acoustic_input_mask is not None:
379
+ assert speech_connect_features.shape[0] == int(acoustic_input_mask.sum().item()), (
380
+ f"Mismatch between selected speech connectors ({speech_connect_features.shape[0]}) and acoustic_input_mask sum ({int(acoustic_input_mask.sum().item())})"
381
+ )
382
+ except Exception:
383
+ pass
384
+ else:
385
+ speech_features, speech_connect_features = self.forward_speech_features(
386
+ speech_tensors=speech_tensors.type_as(x) if speech_tensors is not None else None,
387
+ speech_masks=speech_masks,
388
+ speech_type=kwargs.get("speech_type", "audio"),
389
+ )
390
+ if speech_tensors is not None:
391
+ x[acoustic_input_mask] = speech_connect_features
392
+
393
+ outputs = self.model(
394
+ input_ids=None,
395
+ attention_mask=attention_mask,
396
+ position_ids=position_ids,
397
+ past_key_values=past_key_values,
398
+ inputs_embeds=x,
399
+ use_cache=use_cache,
400
+ output_attentions=output_attentions,
401
+ output_hidden_states=False,
402
+ return_dict=return_dict,
403
+ cache_position=cache_position,
404
+ )
405
+
406
+ hidden_states = outputs.last_hidden_state
407
+ logits = self.lm_head(hidden_states)
408
+ # logits = logits.float()
409
+
410
+ loss = None
411
+ if labels is not None:
412
+ # The custom CE loss with masking is calculated in the training script.
413
+ # We leave the standard loss calculation here as None.
414
+ pass
415
+
416
+ # --- Diffusion Loss Calculation ---
417
+ diffusion_loss = None
418
+ # This block is executed only if we are in a context that involves speech.
419
+ if speech_tensors is not None and acoustic_loss_mask.sum().item() > 0:
420
+ # Build conditioning mask from positions whose NEXT token is a speech latent (shift left by 1)
421
+ cond_mask = torch.zeros_like(acoustic_loss_mask, dtype=torch.bool)
422
+ cond_mask[:, :-1] = acoustic_loss_mask[:, 1:]
423
+ cond_mask[:, 0] = False
424
+ condition_features = hidden_states[cond_mask]
425
+
426
+ speech_len, latent_size = speech_features.shape
427
+ # Sanity check: ensure 1:1 alignment between selected conditions and latents
428
+ try:
429
+ assert condition_features.shape[0] == speech_len, (
430
+ f"Mismatch: condition_features={condition_features.shape[0]} vs speech_features={speech_len}"
431
+ )
432
+ except Exception:
433
+ pass
434
+
435
+ noise = torch.randn(
436
+ (speech_len * ddpm_batch_mul, latent_size),
437
+ device=hidden_states.device,
438
+ dtype=hidden_states.dtype
439
+ )
440
+
441
+ timesteps = torch.multinomial(
442
+ torch.ones(self.config.diffusion_head_config.ddpm_num_steps),
443
+ speech_len * ddpm_batch_mul,
444
+ replacement=True,
445
+ ).to(hidden_states.device)
446
+
447
+ speech_features_repeated = speech_features.repeat_interleave(ddpm_batch_mul, dim=0)
448
+ condition_features_repeated = condition_features.repeat_interleave(ddpm_batch_mul, dim=0)
449
+
450
+ noisy_speech_features = self.model.noise_scheduler.add_noise(
451
+ speech_features_repeated, noise, timesteps
452
+ )
453
+
454
+ model_output = self.model.prediction_head(
455
+ noisy_speech_features,
456
+ timesteps.type_as(x),
457
+ condition_features_repeated
458
+ )
459
+
460
+ prediction_type = self.config.diffusion_head_config.prediction_type
461
+ if prediction_type == "epsilon":
462
+ target_for_loss = noise
463
+ elif prediction_type == "v_prediction":
464
+ target_for_loss = self.model.noise_scheduler.get_velocity(
465
+ speech_features_repeated, noise, timesteps
466
+ )
467
+ else:
468
+ raise NotImplementedError(f"Prediction type {prediction_type} not implemented")
469
+
470
+ diffusion_loss = F.mse_loss(model_output.float(), target_for_loss.float(), reduction='sum')
471
+ if latent_size > 0 and ddpm_batch_mul > 0:
472
+ # Normalize by latent dim, number of sampled diffusion steps per latent, and number of speech tokens
473
+ diffusion_loss = diffusion_loss / latent_size / ddpm_batch_mul / max(speech_len, 1)
474
+ else:
475
+ diffusion_loss = torch.tensor(0.0, device=diffusion_loss.device)
476
+
477
+ else:
478
+ # Dummy loss for DDP to work when there are no speech samples in a batch,
479
+ # but we are in a speech context.
480
+ diffusion_loss = sum(p.sum() for p in self.model.prediction_head.parameters()) * 0.0
481
+ diffusion_loss += sum(p.sum() for p in self.model.acoustic_connector.parameters()) * 0.0
482
+ diffusion_loss += sum(p.sum() for p in self.model.semantic_connector.parameters()) * 0.0
483
+ # --- End Diffusion Loss Calculation ---
484
+
485
+ if not return_dict:
486
+ output = (logits, speech_len) + outputs.to_tuple()[1:]
487
+ return (loss, diffusion_loss) + output
488
+
489
+ return VibeVoiceCausalLMOutputWithPast(
490
+ loss=loss,
491
+ diffusion_loss=diffusion_loss,
492
+ speech_token_num=speech_len if speech_tensors is not None else 0,
493
+ logits=logits,
494
+ past_key_values=outputs.past_key_values,
495
+ hidden_states=outputs.hidden_states,
496
+ attentions=outputs.attentions,
497
+ )
498
+
499
+ AutoModel.register(VibeVoiceConfig, VibeVoiceModel)
500
+ AutoModelForCausalLM.register(VibeVoiceConfig, VibeVoiceForConditionalGeneration)
501
+
502
+ __all__ = [
503
+ "VibeVoiceModel",
504
+ "VibeVoicePreTrainedModel",
505
+ "VibeVoiceForConditionalGeneration",
506
+ "VibeVoiceCausalLMOutputWithPast",
507
+ "VibeVoiceGenerationOutput",
508
+ ]
VibeVoice-finetuning/src/vibevoice/modular/modeling_vibevoice_inference.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Dict, List, Optional, Tuple, Union, Callable
3
+ from tqdm import tqdm
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from transformers.models.auto import AutoModel, AutoModelForCausalLM
8
+
9
+ from transformers.generation import GenerationMixin, GenerationConfig, LogitsProcessor, LogitsProcessorList, StoppingCriteriaList
10
+ from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
11
+ from transformers import modeling_utils
12
+ from transformers.modeling_utils import PreTrainedModel
13
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
14
+ from transformers.utils import logging
15
+
16
+
17
+ # from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceAcousticTokenizerModel, VibeVoiceSemanticTokenizerModel
18
+ from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceTokenizerEncoderOutput
19
+ from .modular_vibevoice_diffusion_head import VibeVoiceDiffusionHead
20
+ from vibevoice.schedule.dpm_solver import DPMSolverMultistepScheduler
21
+
22
+ from .configuration_vibevoice import VibeVoiceConfig
23
+
24
+ from .modular_vibevoice_text_tokenizer import VibeVoiceTextTokenizer, VibeVoiceTextTokenizerFast
25
+
26
+ from .modeling_vibevoice import VibeVoiceModel, VibeVoicePreTrainedModel
27
+ from .streamer import AudioStreamer, AsyncAudioStreamer
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None:
32
+ modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", "rowwise"]
33
+
34
+ @dataclass
35
+ class VibeVoiceCausalLMOutputWithPast(BaseModelOutputWithPast):
36
+ logits: Optional[torch.FloatTensor] = None
37
+
38
+ @dataclass
39
+ class VibeVoiceGenerationOutput(ModelOutput):
40
+ """
41
+ Output type for VibeVoice generation.
42
+
43
+ Args:
44
+ sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
45
+ The generated sequences.
46
+ speech_outputs (`List[torch.FloatTensor]`, *optional*):
47
+ List of generated speech waveforms or latents for each speech segment.
48
+ """
49
+ sequences: torch.LongTensor = None
50
+ speech_outputs: Optional[List[torch.FloatTensor]] = None
51
+ reach_max_step_sample: Optional[torch.BoolTensor] = None
52
+
53
+ class VibeVoiceTokenConstraintProcessor(LogitsProcessor):
54
+ """Constrains token generation to only valid tokens during speech generation."""
55
+
56
+ def __init__(self, valid_token_ids: List[int], device: torch.device = None):
57
+ self.valid_token_ids = torch.tensor(valid_token_ids, dtype=torch.long, device=device)
58
+
59
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
60
+ # Create a mask for valid tokens
61
+ mask = torch.full_like(scores, float('-inf'))
62
+ mask[:, self.valid_token_ids] = 0
63
+
64
+ # Apply mask to scores
65
+ scores = scores + mask
66
+ return scores
67
+
68
+ class VibeVoiceForConditionalGenerationInference(VibeVoicePreTrainedModel, GenerationMixin):
69
+ _tied_weights_keys = ["lm_head.weight"]
70
+ _tp_plan = {"lm_head": "colwise_rep"}
71
+
72
+ def __init__(self, config):
73
+ super().__init__(config)
74
+
75
+ # Initialize the base model
76
+ self.model = VibeVoiceModel(config)
77
+
78
+ # LM head for text generation
79
+ self.lm_head = nn.Linear(config.decoder_config.hidden_size, config.decoder_config.vocab_size, bias=False)
80
+
81
+ # inference configuration
82
+ self.ddpm_inference_steps = config.diffusion_head_config.ddpm_num_inference_steps
83
+
84
+ # Initialize weights and apply final processing
85
+ self.post_init()
86
+
87
+ @property
88
+ def noise_scheduler(self):
89
+ return self.model.noise_scheduler
90
+
91
+ @property
92
+ def prediction_head(self):
93
+ return self.model.prediction_head
94
+
95
+ @property
96
+ def speech_scaling_factor(self):
97
+ return self.model.speech_scaling_factor
98
+
99
+ @property
100
+ def speech_bias_factor(self):
101
+ return self.model.speech_bias_factor
102
+
103
+ @property
104
+ def acoustic_tokenizer(self):
105
+ return self.model.acoustic_tokenizer
106
+
107
+ @property
108
+ def semantic_tokenizer(self):
109
+ return self.model.semantic_tokenizer
110
+
111
+ @property
112
+ def acoustic_connector(self):
113
+ return self.model.acoustic_connector
114
+
115
+ @property
116
+ def semantic_connector(self):
117
+ return self.model.semantic_connector
118
+
119
+ def tie_weights(self):
120
+ """
121
+ Tie the weights between the input embeddings and the output embeddings.
122
+ """
123
+ # Tie lm_head.weight to language_model.embed_tokens.weight
124
+ if not getattr(self.config, 'tie_word_embeddings', False):
125
+ return
126
+
127
+ if hasattr(self, 'lm_head') and hasattr(self.model.language_model, 'embed_tokens'):
128
+ self.lm_head.weight = self.model.language_model.embed_tokens.weight
129
+
130
+ def get_input_embeddings(self):
131
+ return self.model.get_input_embeddings()
132
+
133
+ def set_input_embeddings(self, value):
134
+ self.model.set_input_embeddings(value)
135
+
136
+ def get_output_embeddings(self):
137
+ return self.lm_head
138
+
139
+ def set_output_embeddings(self, new_embeddings):
140
+ self.lm_head = new_embeddings
141
+
142
+ def set_speech_tokenizers(self, acoustic_tokenizer=None, semantic_tokenizer=None):
143
+ """Set the speech tokenizers used for encoding and decoding speech."""
144
+ self.model.set_speech_tokenizers(acoustic_tokenizer, semantic_tokenizer)
145
+
146
+ def set_ddpm_inference_steps(self, num_steps=None):
147
+ self.ddpm_inference_steps = num_steps or self.config.diffusion_head_config.ddpm_num_inference_steps
148
+
149
+ def _process_speech_inputs(self, speech_tensors, speech_masks, speech_type="audio"):
150
+ """Process speech inputs through tokenizers and connectors."""
151
+ with torch.no_grad():
152
+ if speech_type == "audio":
153
+ # Encode audio to acoustic latents
154
+ encoder_output = self.model.acoustic_tokenizer.encode(speech_tensors.unsqueeze(1))
155
+ acoustic_latents = encoder_output.sample(dist_type=self.model.acoustic_tokenizer.std_dist_type)[0]
156
+
157
+ # Apply scaling and bias
158
+ acoustic_features = (acoustic_latents + self.model.speech_bias_factor.to(acoustic_latents.device)) * self.model.speech_scaling_factor.to(acoustic_latents.device)
159
+
160
+ # Connect to language model space
161
+ acoustic_connected = self.model.acoustic_connector(acoustic_features)[speech_masks.cpu()]
162
+
163
+ return acoustic_features, acoustic_connected
164
+ elif speech_type == "pt":
165
+ encoder_output = VibeVoiceTokenizerEncoderOutput(mean=speech_tensors, std=self.acoustic_tokenizer.config.fix_std)
166
+ acoustic_latents = encoder_output.sample(dist_type=self.model.acoustic_tokenizer.std_dist_type)[0]
167
+
168
+ # Apply scaling and bias
169
+ acoustic_features = (acoustic_latents + self.model.speech_bias_factor.to(acoustic_latents.device)) * self.model.speech_scaling_factor.to(acoustic_latents.device)
170
+
171
+ # Connect to language model space
172
+ acoustic_connected = self.model.acoustic_connector(acoustic_features)[speech_masks.cpu()]
173
+
174
+ return acoustic_features, acoustic_connected
175
+ else:
176
+ raise NotImplementedError(f"Speech type {speech_type} not implemented")
177
+
178
+ # @can_return_tuple
179
+ def forward(
180
+ self,
181
+ input_ids: torch.LongTensor = None,
182
+ attention_mask: Optional[torch.Tensor] = None,
183
+ position_ids: Optional[torch.LongTensor] = None,
184
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
185
+ inputs_embeds: Optional[torch.FloatTensor] = None,
186
+ labels: Optional[torch.LongTensor] = None,
187
+ use_cache: Optional[bool] = None,
188
+ output_attentions: Optional[bool] = None,
189
+ output_hidden_states: Optional[bool] = None,
190
+ return_dict: Optional[bool] = None,
191
+ cache_position: Optional[torch.LongTensor] = None,
192
+ speech_tensors: Optional[torch.FloatTensor] = None,
193
+ speech_masks: Optional[torch.BoolTensor] = None,
194
+ speech_input_mask: Optional[torch.BoolTensor] = None,
195
+ logits_to_keep: Union[int, slice] = 0,
196
+ **kwargs,
197
+ ) -> Union[Tuple, VibeVoiceCausalLMOutputWithPast]:
198
+ """
199
+ Args:
200
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
201
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
202
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
203
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
204
+ speech_tensors (`torch.FloatTensor`, *optional*):
205
+ Input speech waveforms for voice cloning or speech understanding.
206
+ speech_masks (`torch.BoolTensor`, *optional*):
207
+ Masks indicating valid speech frames.
208
+ speech_input_mask (`torch.BoolTensor`, *optional*):
209
+ Positions in the input sequence where speech embeddings should be inserted.
210
+
211
+ Returns:
212
+ `VibeVoiceCausalLMOutputWithPast` or tuple
213
+ """
214
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
215
+
216
+ # Get embeddings
217
+ if inputs_embeds is None:
218
+ inputs_embeds = self.model.get_input_embeddings()(input_ids)
219
+
220
+ # Process speech inputs if provided
221
+ if speech_tensors is not None and speech_masks is not None:
222
+ acoustic_features, speech_embeds = self._process_speech_inputs(speech_tensors.to(self.dtype), speech_masks)
223
+ if speech_input_mask is not None:
224
+ inputs_embeds[speech_input_mask] = speech_embeds
225
+
226
+ outputs = self.model(
227
+ inputs_embeds=inputs_embeds,
228
+ attention_mask=attention_mask,
229
+ position_ids=position_ids,
230
+ past_key_values=past_key_values,
231
+ use_cache=use_cache,
232
+ output_attentions=output_attentions,
233
+ output_hidden_states=output_hidden_states,
234
+ return_dict=return_dict,
235
+ cache_position=cache_position,
236
+ **kwargs,
237
+ )
238
+
239
+ hidden_states = outputs[0] if not return_dict else outputs.last_hidden_state
240
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
241
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
242
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
243
+
244
+ if labels is not None:
245
+ raise NotImplementedError("Loss computation is not implemented in this version.")
246
+
247
+ return VibeVoiceCausalLMOutputWithPast(
248
+ logits=logits,
249
+ past_key_values=outputs.past_key_values,
250
+ last_hidden_state=hidden_states,
251
+ attentions=outputs.attentions,
252
+ )
253
+
254
+ def _build_generate_config_model_kwargs(self, generation_config, inputs, tokenizer, return_processors=False, **kwargs):
255
+ if generation_config is None:
256
+ generation_config = GenerationConfig(
257
+ bos_token_id=tokenizer.bos_token_id,
258
+ eos_token_id=tokenizer.eos_token_id,
259
+ pad_token_id = tokenizer.pad_token_id
260
+ )
261
+ else:
262
+ generation_config = GenerationConfig(
263
+ **generation_config,
264
+ bos_token_id=tokenizer.bos_token_id,
265
+ eos_token_id=tokenizer.eos_token_id,
266
+ pad_token_id = tokenizer.pad_token_id
267
+ )
268
+
269
+ generation_config, model_kwargs = self._prepare_generation_config(
270
+ generation_config,
271
+ True,
272
+ speech_start_id=tokenizer.speech_start_id,
273
+ speech_end_id=tokenizer.speech_end_id,
274
+ speech_diffusion_id=tokenizer.speech_diffusion_id,
275
+ **kwargs
276
+ )
277
+ generation_config.speech_start_id = tokenizer.speech_start_id
278
+ generation_config.speech_end_id = tokenizer.speech_end_id
279
+ generation_config.speech_diffusion_id = tokenizer.speech_diffusion_id
280
+
281
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, generation_config.bos_token_id, model_kwargs)
282
+ batch_size = inputs_tensor.shape[0]
283
+ device = self.device
284
+
285
+ self._prepare_special_tokens(generation_config, True, device=device)
286
+ generation_config.use_cache = True
287
+ model_kwargs["use_cache"] = generation_config.use_cache
288
+ input_ids = inputs_tensor.to(self.device)
289
+
290
+ input_ids_length = input_ids.shape[1]
291
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
292
+ has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
293
+ generation_config = self._prepare_generated_length(
294
+ generation_config=generation_config,
295
+ has_default_max_length=has_default_max_length,
296
+ has_default_min_length=has_default_min_length,
297
+ model_input_name=model_input_name,
298
+ inputs_tensor=inputs_tensor,
299
+ input_ids_length=input_ids_length,
300
+ )
301
+
302
+ max_cache_length = generation_config.max_length - 1
303
+ self._prepare_cache_for_generation(generation_config, model_kwargs, None, batch_size, max_cache_length, device)
304
+ model_kwargs['cache_position'] = torch.arange(input_ids_length, device=device, dtype=torch.long)
305
+ for k, v in model_kwargs.items():
306
+ if isinstance(v, torch.Tensor):
307
+ model_kwargs[k] = v.to(device=device)
308
+
309
+ if return_processors:
310
+ logits_processor = self._get_logits_processor(
311
+ generation_config=generation_config,
312
+ input_ids_seq_length=input_ids_length,
313
+ encoder_input_ids=inputs_tensor,
314
+ prefix_allowed_tokens_fn=None,
315
+ logits_processor=LogitsProcessorList(),
316
+ device=inputs_tensor.device,
317
+ model_kwargs=model_kwargs,
318
+ )
319
+
320
+ stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=StoppingCriteriaList())
321
+
322
+ return generation_config, model_kwargs, input_ids, logits_processor, stopping_criteria
323
+ else:
324
+ return generation_config, model_kwargs, input_ids
325
+
326
+ @torch.no_grad()
327
+ def generate(
328
+ self,
329
+ inputs: Optional[torch.Tensor] = None,
330
+ generation_config: Optional[GenerationConfig] = None,
331
+ logits_processor: Optional[LogitsProcessorList] = None,
332
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
333
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
334
+ synced_gpus: Optional[bool] = None,
335
+ assistant_model: Optional["PreTrainedModel"] = None,
336
+ audio_streamer: Optional[Union[AudioStreamer, AsyncAudioStreamer]] = None,
337
+ negative_prompt_ids: Optional[torch.Tensor] = None,
338
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
339
+ speech_tensors: Optional[torch.FloatTensor] = None,
340
+ speech_masks: Optional[torch.BoolTensor] = None,
341
+ speech_input_mask: Optional[torch.BoolTensor] = None,
342
+ return_speech: bool = True,
343
+ cfg_scale: float = 1.0,
344
+ stop_check_fn: Optional[Callable[[], bool]] = None,
345
+ **kwargs,
346
+ ) -> Union[torch.LongTensor, VibeVoiceGenerationOutput]:
347
+ """
348
+ Generates sequences of token ids and optionally speech outputs.
349
+
350
+ Args:
351
+ All standard generation arguments from GenerationMixin
352
+ negative_prompt_ids: Negative prompt for CFG in speech generation
353
+ negative_prompt_attention_mask: Attention mask for negative prompt
354
+ speech_tensors: Input speech for voice cloning
355
+ speech_masks: Masks for speech tensors
356
+ speech_input_mask: Positions to insert speech embeddings
357
+ return_speech: Whether to decode and return speech outputs
358
+ cfg_scale: CFG scale for speech generation
359
+ stop_check_fn: Optional callable that returns True if generation should stop
360
+
361
+ Returns:
362
+ Generated token sequences and optionally speech outputs
363
+ """
364
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
365
+ tokenizer = kwargs.pop("tokenizer", None) # Pull this out first, we only use it for stopping criteria
366
+ parsed_scripts = kwargs.pop("parsed_scripts", None)
367
+ all_speakers_list = kwargs.pop("all_speakers_list", None)
368
+ max_length_times = kwargs.pop("max_length_times", 2)
369
+
370
+ if kwargs.get('max_new_tokens', None) is None:
371
+ kwargs['max_new_tokens'] = self.config.decoder_config.max_position_embeddings - kwargs['input_ids'].shape[-1]
372
+
373
+ generation_config, model_kwargs, input_ids, logits_processor, stopping_criteria = self._build_generate_config_model_kwargs(
374
+ generation_config, inputs, tokenizer, return_processors=True, **kwargs
375
+ )
376
+
377
+ negative_kwargs = {
378
+ 'input_ids': torch.full((kwargs['input_ids'].shape[0], 1), tokenizer.speech_start_id, dtype=torch.long, device=kwargs['input_ids'].device),
379
+ 'attention_mask': torch.ones((kwargs['input_ids'].shape[0], 1), dtype=torch.long, device=kwargs['input_ids'].device),
380
+ 'max_new_tokens': kwargs.get('max_new_tokens', 100)
381
+ }
382
+ negative_generation_config, negative_model_kwargs, negative_input_ids = self._build_generate_config_model_kwargs(
383
+ None, None, tokenizer, return_processors=False, **negative_kwargs
384
+ )
385
+
386
+ acoustic_cache = VibeVoiceTokenizerStreamingCache()
387
+ semantic_cache = VibeVoiceTokenizerStreamingCache()
388
+
389
+ batch_size = input_ids.shape[0]
390
+ device = input_ids.device
391
+ finished_tags = torch.zeros(batch_size, dtype=torch.bool, device=device)
392
+ correct_cnt = torch.zeros(batch_size, dtype=torch.long, device=device)
393
+ is_prefill = True
394
+ inputs_embeds = None
395
+ verbose = kwargs.get("verbose", False)
396
+
397
+ # Initialize audio chunks storage for each sample
398
+ audio_chunks = [[] for _ in range(batch_size)]
399
+
400
+ initial_length = input_ids.shape[-1]
401
+ initial_length_per_sample = model_kwargs['attention_mask'].sum(dim=-1)
402
+
403
+ # Define all valid tokens that can be generated
404
+ valid_tokens = [
405
+ generation_config.speech_start_id,
406
+ generation_config.speech_end_id,
407
+ generation_config.speech_diffusion_id,
408
+ generation_config.eos_token_id
409
+ ]
410
+ # Add bos_token_id if it exists
411
+ if hasattr(generation_config, 'bos_token_id') and generation_config.bos_token_id is not None:
412
+ valid_tokens.append(generation_config.bos_token_id)
413
+
414
+ # Add custom processor to constrain token generation
415
+ token_constraint_processor = VibeVoiceTokenConstraintProcessor(valid_tokens, device=device)
416
+ if logits_processor is None:
417
+ logits_processor = LogitsProcessorList()
418
+ logits_processor.append(token_constraint_processor)
419
+
420
+ max_steps = min(generation_config.max_length - initial_length, int(max_length_times * initial_length))
421
+ max_step_per_sample = torch.min(generation_config.max_length - initial_length_per_sample, (max_length_times * initial_length_per_sample).long())
422
+ reach_max_step_sample = torch.zeros(batch_size, dtype=torch.bool, device=device)
423
+
424
+ # Create progress iterator if verbose
425
+ if kwargs.get("show_progress_bar", True):
426
+ progress_bar = tqdm(range(max_steps), desc="Generating", leave=False)
427
+ else:
428
+ progress_bar = range(max_steps)
429
+
430
+ for step in progress_bar:
431
+ # Check for external stop signal
432
+ if stop_check_fn is not None and stop_check_fn():
433
+ if verbose:
434
+ print(f"Generation stopped externally at step {step + 1}")
435
+ # End the audio streamer if it exists
436
+ if audio_streamer is not None:
437
+ audio_streamer.end()
438
+ break
439
+
440
+ # Check if audio_streamer has been ended (stopped externally)
441
+ if audio_streamer is not None and hasattr(audio_streamer, 'finished_flags'):
442
+ if any(audio_streamer.finished_flags):
443
+ if verbose:
444
+ print(f"Audio generation stopped externally at step {step + 1}")
445
+ break
446
+
447
+ if finished_tags.all():
448
+ if hasattr(progress_bar, 'set_description'):
449
+ progress_bar.set_description("Generation complete")
450
+ break
451
+
452
+ if input_ids.shape[-1] >= generation_config.max_length:
453
+ print(f"Reached maximum generation length {generation_config.max_length}, stopped it.")
454
+ reached_samples = torch.arange(batch_size, device=device)[~finished_tags]
455
+ if reached_samples.numel() > 0:
456
+ reach_max_step_sample[reached_samples] = True
457
+ break
458
+
459
+ # Update progress bar description with active samples
460
+ if hasattr(progress_bar, 'set_description'):
461
+ active_samples = (~finished_tags).sum().item()
462
+ progress_bar.set_description(f"Generating (active: {active_samples}/{batch_size})")
463
+
464
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
465
+ if is_prefill:
466
+ # we process the speech inputs only during the first generation step
467
+ prefill_inputs = {
468
+ "speech_tensors": speech_tensors.to(device=device),
469
+ "speech_masks": speech_masks.to(device),
470
+ "speech_input_mask": speech_input_mask.to(device),
471
+ }
472
+ is_prefill = False
473
+ else:
474
+ _ = model_inputs.pop('inputs_embeds', None)
475
+ prefill_inputs = {'inputs_embeds': inputs_embeds}
476
+
477
+ # Forward pass through the model
478
+ outputs = self(
479
+ **model_inputs, **prefill_inputs, logits_to_keep=1, return_dict=True, output_attentions=False, output_hidden_states=False,
480
+ )
481
+ model_kwargs = self._update_model_kwargs_for_generation(
482
+ outputs, model_kwargs, is_encoder_decoder=False,
483
+ )
484
+
485
+ # Get logits and apply logits processor
486
+ next_token_logits = outputs.logits[:, -1, :].to(copy=True, dtype=torch.float32, device=input_ids.device)
487
+ # next_token_logits = outputs.logits[:, -1, :].to(copy=True, device=input_ids.device)
488
+ next_token_scores = logits_processor(input_ids, next_token_logits)
489
+
490
+ # token selection
491
+ if generation_config.do_sample:
492
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
493
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
494
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
495
+ else:
496
+ next_tokens = torch.argmax(next_token_scores, dim=-1)
497
+
498
+ next_tokens[finished_tags] = generation_config.eos_token_id
499
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
500
+
501
+ if not kwargs.get('refresh_negative', True):
502
+ negative_model_inputs = self.prepare_inputs_for_generation(negative_input_ids, **negative_model_kwargs)
503
+ # Forward negative pass through the model
504
+ if negative_model_inputs['inputs_embeds'] is None and inputs_embeds is not None:
505
+ negative_model_inputs['inputs_embeds'] = inputs_embeds
506
+ negative_model_inputs['input_ids'] = None
507
+
508
+ negative_outputs = self(
509
+ **negative_model_inputs, logits_to_keep=0, return_dict=True, output_attentions=False, output_hidden_states=False,
510
+ )
511
+ negative_model_kwargs = self._update_model_kwargs_for_generation(
512
+ negative_outputs, negative_model_kwargs, is_encoder_decoder=False,
513
+ )
514
+ negative_input_ids = torch.cat([negative_input_ids, next_tokens[:, None]], dim=-1)
515
+
516
+ # reached end of generation
517
+ if (next_tokens == generation_config.eos_token_id).any():
518
+ eos_indices = (next_tokens == generation_config.eos_token_id).nonzero(as_tuple=False).squeeze(1)
519
+ # Only print for samples that are newly finished (not already marked as finished)
520
+ new_eos_indices = eos_indices[~finished_tags[eos_indices]]
521
+ if new_eos_indices.numel() > 0:
522
+ finished_tags[new_eos_indices] = True
523
+ if verbose:
524
+ print(f"Samples {new_eos_indices.tolist()} reached EOS token at step {step + 1}.", flush=True)
525
+ if audio_streamer is not None:
526
+ audio_streamer.end(new_eos_indices)
527
+
528
+ # Check if any sample reached its maximum generation length
529
+ max_length_reached = step >= max_step_per_sample
530
+ new_max_length_indices = torch.nonzero(max_length_reached & ~finished_tags, as_tuple=False).squeeze(1)
531
+ if new_max_length_indices.numel() > 0:
532
+ finished_tags[new_max_length_indices] = True
533
+ reach_max_step_sample[new_max_length_indices] = True
534
+ if verbose:
535
+ print(f"Samples {new_max_length_indices.tolist()} reached max generation length at step {step + 1}.", flush=True)
536
+ if audio_streamer is not None:
537
+ audio_streamer.end(new_max_length_indices)
538
+
539
+ # speech_end
540
+ diffusion_end_indices = (next_tokens == generation_config.speech_end_id).nonzero(as_tuple=False).squeeze(1)
541
+ if diffusion_end_indices.numel() > 0:
542
+ # Clear tokenizer caches for samples that reached speech end
543
+ acoustic_cache.set_to_zero(diffusion_end_indices)
544
+ semantic_cache.set_to_zero(diffusion_end_indices)
545
+
546
+ # speech_begin
547
+ diffusion_start_indices = torch.arange(batch_size, device=device)[~finished_tags & (next_tokens == generation_config.speech_start_id)]
548
+ if diffusion_start_indices.numel() > 0 and kwargs.get('refresh_negative', True):
549
+ # update attention mask
550
+ for i, sample_idx in enumerate(diffusion_start_indices.tolist()):
551
+ negative_model_kwargs['attention_mask'][sample_idx, :] = 0
552
+ negative_model_kwargs['attention_mask'][sample_idx, -1] = 1
553
+ # update past key values
554
+ for layer_idx, (k_cache, v_cache) in enumerate(zip(negative_model_kwargs['past_key_values'].key_cache,
555
+ negative_model_kwargs['past_key_values'].value_cache)):
556
+ # Process each non-diffusion sample
557
+ for sample_idx in diffusion_start_indices.tolist():
558
+ # Shift cache for this sample
559
+ k_cache[sample_idx, :, -1, :] = k_cache[sample_idx, :, 0, :].clone()
560
+ v_cache[sample_idx, :, -1, :] = v_cache[sample_idx, :, 0, :].clone()
561
+ # update negative_input_ids
562
+ for sample_idx in diffusion_start_indices.tolist():
563
+ negative_input_ids[sample_idx, -1] = generation_config.speech_start_id
564
+
565
+ # Prepare inputs_embeds for next iteration
566
+ # Initialize with default embeddings for all tokens
567
+ next_inputs_embeds = self.model.get_input_embeddings()(next_tokens).unsqueeze(1) # [batch_size, 1, hidden_size]
568
+
569
+ # forward diffusion
570
+ # Diffusion indices are those that are not finished and not special tokens
571
+ diffusion_indices = torch.arange(batch_size, device=device)[~finished_tags & (next_tokens == generation_config.speech_diffusion_id)]
572
+
573
+ if diffusion_indices.numel() > 0:
574
+ if kwargs.get('refresh_negative', True):
575
+ negative_model_inputs = self.prepare_inputs_for_generation(negative_input_ids, **negative_model_kwargs)
576
+ # Forward negative pass through the model
577
+ if negative_model_inputs['inputs_embeds'] is None and inputs_embeds is not None:
578
+ negative_model_inputs['inputs_embeds'] = inputs_embeds
579
+ negative_model_inputs['input_ids'] = None
580
+
581
+ negative_outputs = self(
582
+ **negative_model_inputs, logits_to_keep=0, return_dict=True, output_attentions=False, output_hidden_states=False,
583
+ )
584
+ negative_model_kwargs = self._update_model_kwargs_for_generation(
585
+ negative_outputs, negative_model_kwargs, is_encoder_decoder=False,
586
+ )
587
+ negative_input_ids = torch.cat([negative_input_ids, next_tokens[:, None]], dim=-1)
588
+ # correct the non-diffusion indices
589
+ # we forward all samples' negative outputs even if
590
+ # they are not in diffusion mode to keep the cache consistent
591
+ # So we need to correct the kv cache of non-diffusion samples
592
+ non_diffusion_mask = ~finished_tags & (next_tokens != generation_config.speech_diffusion_id)
593
+ if non_diffusion_mask.any():
594
+ non_diffusion_indices = torch.arange(batch_size, device=device)[non_diffusion_mask]
595
+ start_indices = correct_cnt[non_diffusion_indices]
596
+
597
+ # 1. Update attention_mask - need to handle each sample separately
598
+ seq_len = negative_model_kwargs['attention_mask'].shape[1]
599
+ for i, (sample_idx, start_idx) in enumerate(zip(non_diffusion_indices.tolist(), start_indices.tolist())):
600
+ # Shift the attention mask for this sample
601
+ if start_idx + 1 < seq_len - 1:
602
+ negative_model_kwargs['attention_mask'][sample_idx, start_idx+1:] = \
603
+ negative_model_kwargs['attention_mask'][sample_idx, start_idx:-1].clone()
604
+ negative_model_kwargs['attention_mask'][sample_idx, start_idx] = 0
605
+
606
+ # 2. Update past_key_values
607
+ for layer_idx, (k_cache, v_cache) in enumerate(zip(negative_model_kwargs['past_key_values'].key_cache,
608
+ negative_model_kwargs['past_key_values'].value_cache)):
609
+ # Process each non-diffusion sample
610
+ for sample_idx, start_idx in zip(non_diffusion_indices.tolist(), start_indices.tolist()):
611
+ if start_idx + 1 < k_cache.shape[2] - 1:
612
+ # Shift cache for this sample
613
+ k_cache[sample_idx, :, start_idx+1:, :] = k_cache[sample_idx, :, start_idx:-1, :].clone()
614
+ v_cache[sample_idx, :, start_idx+1:, :] = v_cache[sample_idx, :, start_idx:-1, :].clone()
615
+
616
+ # 3. Update negative_input_ids
617
+ for sample_idx, start_idx in zip(non_diffusion_indices.tolist(), start_indices.tolist()):
618
+ if start_idx + 1 < negative_input_ids.shape[1] - 1:
619
+ negative_input_ids[sample_idx, start_idx+1:] = \
620
+ negative_input_ids[sample_idx, start_idx:-1].clone()
621
+
622
+ correct_cnt[non_diffusion_indices] += 1
623
+
624
+ positive_condition = outputs.last_hidden_state[diffusion_indices, -1, :]
625
+ negative_condition = negative_outputs.last_hidden_state[diffusion_indices, -1, :]
626
+
627
+ speech_latent = self.sample_speech_tokens(
628
+ positive_condition,
629
+ negative_condition,
630
+ cfg_scale=cfg_scale,
631
+ ).unsqueeze(1)
632
+
633
+ # Decode acoustic latent to audio using acoustic streaming cache
634
+ scaled_latent = speech_latent / self.model.speech_scaling_factor.to(speech_latent.device) - self.model.speech_bias_factor.to(speech_latent.device)
635
+ audio_chunk = self.model.acoustic_tokenizer.decode(
636
+ scaled_latent.to(self.model.acoustic_tokenizer.device),
637
+ cache=acoustic_cache, # Use acoustic-specific cache
638
+ sample_indices=diffusion_indices.to(self.model.acoustic_tokenizer.device),
639
+ use_cache=True,
640
+ debug=False
641
+ )
642
+
643
+ # Store audio chunks for each sample
644
+ for i, sample_idx in enumerate(diffusion_indices):
645
+ idx = sample_idx.item()
646
+ # Only append audio chunk if the sample is not finished
647
+ if not finished_tags[idx]:
648
+ audio_chunks[idx].append(audio_chunk[i])
649
+
650
+ # Add streaming support here
651
+ if audio_streamer is not None:
652
+ # Stream the audio chunks immediately
653
+ audio_streamer.put(audio_chunk, diffusion_indices)
654
+
655
+ # Encode audio to semantic features using semantic streaming cache
656
+ semantic_features = self.model.semantic_tokenizer.encode(
657
+ audio_chunk,
658
+ cache=semantic_cache, # Use semantic-specific cache
659
+ sample_indices=diffusion_indices,
660
+ use_cache=True,
661
+ debug=False
662
+ ).mean # semantic tokenizer has no VAE.
663
+
664
+ # Combine acoustic and semantic features for next input
665
+ acoustic_embed = self.model.acoustic_connector(speech_latent)
666
+ semantic_embed = self.model.semantic_connector(semantic_features)
667
+ diffusion_embeds = acoustic_embed + semantic_embed
668
+
669
+ # Update embeddings for diffusion indices
670
+ next_inputs_embeds[diffusion_indices] = diffusion_embeds
671
+
672
+ # Set inputs_embeds for next iteration
673
+ inputs_embeds = next_inputs_embeds
674
+
675
+ if audio_streamer is not None:
676
+ audio_streamer.end()
677
+
678
+ # Concatenate audio chunks for each sample
679
+ final_audio_outputs = []
680
+ for sample_chunks in audio_chunks:
681
+ if sample_chunks:
682
+ # Concatenate all chunks along the time dimension (assumed to be the last dimension)
683
+ concatenated_audio = torch.cat(sample_chunks, dim=-1)
684
+ final_audio_outputs.append(concatenated_audio)
685
+ else:
686
+ # If no audio was generated for this sample, append None
687
+ final_audio_outputs.append(None)
688
+
689
+ return VibeVoiceGenerationOutput(
690
+ sequences=input_ids,
691
+ speech_outputs=final_audio_outputs if return_speech else None,
692
+ reach_max_step_sample=reach_max_step_sample,
693
+ )
694
+
695
+ @torch.no_grad()
696
+ def sample_speech_tokens(self, condition, neg_condition, cfg_scale=3.0):
697
+ self.model.noise_scheduler.set_timesteps(self.ddpm_inference_steps)
698
+ condition = torch.cat([condition, neg_condition], dim=0).to(self.model.prediction_head.device)
699
+ speech = torch.randn(condition.shape[0], self.config.acoustic_vae_dim).to(condition)
700
+ for t in self.model.noise_scheduler.timesteps:
701
+ half = speech[: len(speech) // 2]
702
+ combined = torch.cat([half, half], dim=0)
703
+ eps = self.model.prediction_head(combined, t.repeat(combined.shape[0]).to(combined), condition=condition)
704
+ cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
705
+ half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)
706
+ eps = torch.cat([half_eps, half_eps], dim=0)
707
+ speech = self.model.noise_scheduler.step(eps, t, speech).prev_sample
708
+ return speech[: len(speech) // 2]
709
+
710
+
711
+ AutoModelForCausalLM.register(VibeVoiceConfig, VibeVoiceForConditionalGenerationInference)
712
+
713
+ __all__ = [
714
+ "VibeVoiceForConditionalGenerationInference",
715
+ ]
VibeVoice-finetuning/src/vibevoice/modular/modular_vibevoice_diffusion_head.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+ from transformers.models.auto import AutoModel
9
+ from transformers.modeling_utils import PreTrainedModel
10
+ # from transformers.modeling_layers import GradientCheckpointingLayer
11
+ from transformers.activations import ACT2FN
12
+ from transformers.utils import logging
13
+
14
+ from .configuration_vibevoice import VibeVoiceDiffusionHeadConfig
15
+
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+
20
+ class RMSNorm(nn.Module):
21
+ def __init__(self, dim: int, eps: float = 1e-6, elementwise_affine=True, memory_efficient=False):
22
+ super().__init__()
23
+ self.dim = dim
24
+ self.eps = eps
25
+ self.elementwise_affine = elementwise_affine
26
+ if self.elementwise_affine:
27
+ self.weight = nn.Parameter(torch.ones(dim))
28
+ else:
29
+ self.register_parameter('weight', None)
30
+
31
+ def _norm(self, x):
32
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
33
+
34
+ def forward(self, x):
35
+ output = self._norm(x.float()).type_as(x)
36
+ if self.weight is not None:
37
+ output = output * self.weight
38
+ return output
39
+
40
+ def extra_repr(self) -> str:
41
+ return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}'
42
+
43
+ def modulate(x, shift, scale):
44
+ """Apply modulation to input tensor."""
45
+ return x * (1 + scale) + shift
46
+
47
+
48
+ class TimestepEmbedder(nn.Module):
49
+ """
50
+ Embeds scalar timesteps into vector representations.
51
+
52
+ Args:
53
+ hidden_size (`int`): Size of the output embedding
54
+ frequency_embedding_size (`int`, optional): Size of the intermediate frequency embedding
55
+ """
56
+ def __init__(self, hidden_size, frequency_embedding_size=256):
57
+ super().__init__()
58
+ self.mlp = nn.Sequential(
59
+ nn.Linear(frequency_embedding_size, hidden_size, bias=False),
60
+ # nn.SiLU(),
61
+ ACT2FN['silu'],
62
+ nn.Linear(hidden_size, hidden_size, bias=False),
63
+ )
64
+ self.frequency_embedding_size = frequency_embedding_size
65
+
66
+ @staticmethod
67
+ def timestep_embedding(t, dim, max_period=10000):
68
+ """
69
+ Create sinusoidal timestep embeddings.
70
+
71
+ Args:
72
+ t (`torch.Tensor`): A 1-D Tensor of N indices, one per batch element.
73
+ These may be fractional.
74
+ dim (`int`): The dimension of the output.
75
+ max_period (`int`, optional): Controls the minimum frequency of the embeddings.
76
+
77
+ Returns:
78
+ `torch.Tensor`: An [N, D] Tensor of positional embeddings.
79
+ """
80
+ half = dim // 2
81
+ freqs = torch.exp(
82
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
83
+ ).to(t.device)
84
+ args = t[:, None].float() * freqs[None]
85
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
86
+ if dim % 2:
87
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
88
+ return embedding.to(t.dtype)
89
+
90
+ def forward(self, t):
91
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
92
+ t_emb = self.mlp(t_freq)
93
+ return t_emb
94
+
95
+
96
+ class FeedForwardNetwork(nn.Module):
97
+ """
98
+ Standard feed-forward network with SwiGLU activation.
99
+
100
+ Args:
101
+ embed_dim (`int`): Input dimension
102
+ ffn_dim (`int`): Hidden dimension
103
+ """
104
+ def __init__(
105
+ self,
106
+ embed_dim,
107
+ ffn_dim,
108
+ ):
109
+ super().__init__()
110
+ self.embed_dim = embed_dim
111
+ self.gate_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False)
112
+ self.up_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False)
113
+ self.down_proj = nn.Linear(ffn_dim, self.embed_dim, bias=False)
114
+ self.act_fn = ACT2FN['silu'] # Using SiLU as the activation function
115
+
116
+ def forward(self, x):
117
+ gate = self.gate_proj(x)
118
+ up = self.up_proj(x)
119
+
120
+ # SwiGLU activation
121
+ # gate = F.silu(gate)
122
+ gate = self.act_fn(gate)
123
+ return self.down_proj(gate * up)
124
+
125
+
126
+ class HeadLayer(nn.Module):
127
+ """
128
+ A layer in the diffusion head.
129
+
130
+ Args:
131
+ embed_dim (`int`): Input dimension
132
+ ffn_dim (`int`): Hidden dimension
133
+ cond_dim (`int`): Condition embedding dimension
134
+ norm_eps (`float`, optional): Epsilon for normalization
135
+ """
136
+ def __init__(
137
+ self,
138
+ embed_dim,
139
+ ffn_dim,
140
+ cond_dim,
141
+ norm_eps=1e-5,
142
+ ):
143
+ super().__init__()
144
+ self.embed_dim = embed_dim
145
+ self.cond_dim = cond_dim
146
+ self.ffn_dim = ffn_dim
147
+ self.ffn = FeedForwardNetwork(
148
+ self.embed_dim,
149
+ self.ffn_dim,
150
+ )
151
+ self.norm = RMSNorm(self.embed_dim, eps=norm_eps)
152
+ self.adaLN_modulation = nn.Sequential(
153
+ # nn.SiLU(),
154
+ ACT2FN['silu'],
155
+ nn.Linear(cond_dim, 3 * self.embed_dim, bias=False)
156
+ )
157
+
158
+ def forward(self, x, c):
159
+ shift_ffn, scale_ffn, gate_ffn = self.adaLN_modulation(c).chunk(3, dim=-1)
160
+ x = x + gate_ffn * self.ffn(modulate(self.norm(x), shift_ffn, scale_ffn))
161
+ return x
162
+
163
+
164
+ class FinalLayer(nn.Module):
165
+ """
166
+ Final layer in the diffusion head.
167
+
168
+ Args:
169
+ hidden_size (`int`): Input dimension
170
+ output_size (`int`): Output dimension
171
+ cond_size (`int`): Condition embedding dimension
172
+ norm_eps (`float`, optional): Epsilon for normalization
173
+ """
174
+ def __init__(self, hidden_size, output_size, cond_size, norm_eps=1e-5):
175
+ super().__init__()
176
+ self.norm_final = RMSNorm(hidden_size, eps=norm_eps, elementwise_affine=False)
177
+ self.linear = nn.Linear(hidden_size, output_size, bias=False)
178
+ self.adaLN_modulation = nn.Sequential(
179
+ # nn.SiLU(),
180
+ ACT2FN['silu'],
181
+ nn.Linear(cond_size, 2 * hidden_size, bias=False)
182
+ )
183
+
184
+ def forward(self, x, c):
185
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
186
+ x = modulate(self.norm_final(x), shift, scale)
187
+ x = self.linear(x)
188
+ return x
189
+
190
+
191
+ class VibeVoiceDiffusionHead(PreTrainedModel):
192
+ """
193
+ Diffusion head model for vibevoice.
194
+
195
+ Args:
196
+ config (`VibeVoiceDiffusionHeadConfig`): Model configuration
197
+ latent_size (`int`, optional): Size of the latent space. If not provided, uses `config.latent_size`.
198
+ """
199
+ config_class = VibeVoiceDiffusionHeadConfig
200
+ supports_gradient_checkpointing = True
201
+ _supports_flash_attn_2 = True
202
+ _supports_sdpa = True
203
+
204
+ def __init__(
205
+ self,
206
+ config,
207
+ ):
208
+ super().__init__(config)
209
+ self.config = config
210
+ self.cond_dim = config.hidden_size
211
+ latent_size = config.latent_size
212
+
213
+ self.noisy_images_proj = nn.Linear(latent_size, config.hidden_size, bias=False)
214
+ self.cond_proj = nn.Linear(config.hidden_size, self.cond_dim, bias=False)
215
+ self.t_embedder = TimestepEmbedder(self.cond_dim)
216
+
217
+ ffn_dim = int(config.hidden_size * config.head_ffn_ratio)
218
+
219
+ # Create the intermediate layers
220
+ self.layers = nn.ModuleList([
221
+ HeadLayer(
222
+ embed_dim=config.hidden_size,
223
+ ffn_dim=ffn_dim,
224
+ cond_dim=self.cond_dim,
225
+ norm_eps=config.rms_norm_eps
226
+ )
227
+ for _ in range(config.head_layers)
228
+ ])
229
+
230
+ # Final layer for output
231
+ self.final_layer = FinalLayer(
232
+ hidden_size=config.hidden_size,
233
+ output_size=latent_size,
234
+ cond_size=self.cond_dim,
235
+ norm_eps=config.rms_norm_eps
236
+ )
237
+
238
+ self.initialize_weights()
239
+
240
+ def initialize_weights(self):
241
+ """Initialize the weights of the model."""
242
+ # Initialize timestep embedder
243
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
244
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
245
+
246
+ # Zero-out adaLN modulation layers
247
+ for layer in self.layers:
248
+ nn.init.constant_(layer.adaLN_modulation[-1].weight, 0)
249
+
250
+ # Zero-out output layers
251
+ nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
252
+ nn.init.constant_(self.final_layer.linear.weight, 0)
253
+
254
+ def forward(
255
+ self,
256
+ noisy_images,
257
+ timesteps,
258
+ condition,
259
+ ):
260
+ """
261
+ Forward pass of the prediction head.
262
+
263
+ Args:
264
+ noisy_images (`torch.Tensor`): Noisy images/latents to denoise
265
+ timesteps (`torch.Tensor`): Timesteps for diffusion
266
+ condition (`torch.Tensor`): Conditioning information
267
+
268
+ Returns:
269
+ `torch.Tensor`: The predicted noise/velocity
270
+ """
271
+ x = self.noisy_images_proj(noisy_images)
272
+ t = self.t_embedder(timesteps)
273
+ condition = self.cond_proj(condition)
274
+ c = condition + t
275
+
276
+ for layer in self.layers:
277
+ x = layer(x, c)
278
+
279
+ x = self.final_layer(x, c)
280
+ return x
281
+
282
+
283
+ AutoModel.register(VibeVoiceDiffusionHeadConfig, VibeVoiceDiffusionHead)
284
+
285
+ __all__ = [
286
+ "VibeVoiceDiffusionHead",
287
+ ]
VibeVoice-finetuning/src/vibevoice/modular/modular_vibevoice_text_tokenizer.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tokenization classes for vibevoice."""
2
+
3
+ from typing import List, Optional, Union
4
+
5
+ from transformers.utils import logging
6
+ from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer
7
+ from transformers.models.qwen2.tokenization_qwen2_fast import Qwen2TokenizerFast
8
+
9
+ logger = logging.get_logger(__name__)
10
+
11
+
12
+ class VibeVoiceTextTokenizer(Qwen2Tokenizer):
13
+ """
14
+ Construct a VibeVoice tokenizer. Based on the Qwen2 tokenizer with additional special tokens for speech.
15
+
16
+ Args:
17
+ vocab_file (`str`):
18
+ Path to the vocabulary file.
19
+ merges_file (`str`):
20
+ Path to the merges file.
21
+ errors (`str`, *optional*, defaults to `"replace"`):
22
+ Paradigm to follow when decoding bytes to UTF-8.
23
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
24
+ The unknown token.
25
+ bos_token (`str`, *optional*):
26
+ The beginning of sequence token. Not used for vibevoice.
27
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
28
+ The end of sequence token.
29
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
30
+ The token used for padding.
31
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
32
+ Whether or not to add special tokens when encoding.
33
+ """
34
+
35
+ model_input_names = ["input_ids", "attention_mask"]
36
+
37
+ def __init__(
38
+ self,
39
+ vocab_file,
40
+ merges_file,
41
+ errors="replace",
42
+ unk_token="<|endoftext|>",
43
+ bos_token=None,
44
+ eos_token="<|endoftext|>",
45
+ pad_token="<|endoftext|>",
46
+ add_prefix_space=False,
47
+ add_special_tokens=True,
48
+ **kwargs,
49
+ ):
50
+ super().__init__(
51
+ vocab_file=vocab_file,
52
+ merges_file=merges_file,
53
+ errors=errors,
54
+ unk_token=unk_token,
55
+ bos_token=bos_token,
56
+ eos_token=eos_token,
57
+ pad_token=pad_token,
58
+ add_prefix_space=add_prefix_space,
59
+ add_special_tokens=add_special_tokens,
60
+ **kwargs,
61
+ )
62
+
63
+ # Add VibeVoice-specific special tokens
64
+ self._add_vibevoice_special_tokens()
65
+
66
+ def _add_vibevoice_special_tokens(self):
67
+ """Add VibeVoice-specific special tokens."""
68
+ special_tokens = {
69
+ "additional_special_tokens": [
70
+ "<|vision_start|>", # Speech start (reusing vision tokens)
71
+ "<|vision_end|>", # Speech end
72
+ "<|vision_pad|>", # Speech diffusion pad
73
+ ]
74
+ }
75
+ num_added = self.add_special_tokens(special_tokens)
76
+
77
+ # Cache special token IDs
78
+ self._speech_start_id = self.convert_tokens_to_ids("<|vision_start|>")
79
+ self._speech_end_id = self.convert_tokens_to_ids("<|vision_end|>")
80
+ self._speech_diffusion_id = self.convert_tokens_to_ids("<|vision_pad|>")
81
+
82
+ self._eos_id = self.convert_tokens_to_ids('<|endoftext|>')
83
+
84
+ return num_added
85
+
86
+ @property
87
+ def eos_id(self) -> int:
88
+ """Id of the end of sequence token."""
89
+ return self._eos_id
90
+
91
+ @property
92
+ def speech_start_id(self) -> int:
93
+ """Id of the speech start token."""
94
+ return self._speech_start_id
95
+
96
+ @property
97
+ def speech_end_id(self) -> int:
98
+ """Id of the speech end token."""
99
+ return self._speech_end_id
100
+
101
+ @property
102
+ def speech_diffusion_id(self) -> int:
103
+ """Id of the speech diffusion token."""
104
+ return self._speech_diffusion_id
105
+
106
+ @property
107
+ def pad_id(self) -> int:
108
+ """Id used for padding (returns -100 for loss masking)."""
109
+ return -100
110
+
111
+
112
+ class VibeVoiceTextTokenizerFast(Qwen2TokenizerFast):
113
+ """
114
+ Construct a "fast" VibeVoice tokenizer (backed by HuggingFace's *tokenizers* library).
115
+ Based on the Qwen2 tokenizer with additional special tokens for speech.
116
+
117
+ Args:
118
+ vocab_file (`str`, *optional*):
119
+ Path to the vocabulary file.
120
+ merges_file (`str`, *optional*):
121
+ Path to the merges file.
122
+ tokenizer_file (`str`, *optional*):
123
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file.
124
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
125
+ The unknown token.
126
+ bos_token (`str`, *optional*):
127
+ The beginning of sequence token. Not used for vibevoice.
128
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
129
+ The end of sequence token.
130
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
131
+ The token used for padding.
132
+ """
133
+
134
+ model_input_names = ["input_ids", "attention_mask"]
135
+
136
+ def __init__(
137
+ self,
138
+ vocab_file=None,
139
+ merges_file=None,
140
+ tokenizer_file=None,
141
+ unk_token="<|endoftext|>",
142
+ bos_token=None,
143
+ eos_token="<|endoftext|>",
144
+ pad_token="<|endoftext|>",
145
+ add_prefix_space=False,
146
+ **kwargs,
147
+ ):
148
+ super().__init__(
149
+ vocab_file=vocab_file,
150
+ merges_file=merges_file,
151
+ tokenizer_file=tokenizer_file,
152
+ unk_token=unk_token,
153
+ bos_token=bos_token,
154
+ eos_token=eos_token,
155
+ pad_token=pad_token,
156
+ add_prefix_space=add_prefix_space,
157
+ **kwargs,
158
+ )
159
+
160
+ # Add VibeVoice-specific special tokens
161
+ self._add_vibevoice_special_tokens()
162
+
163
+ def _add_vibevoice_special_tokens(self):
164
+ """Add VibeVoice-specific special tokens."""
165
+ special_tokens = {
166
+ "additional_special_tokens": [
167
+ "<|vision_start|>", # Speech start (reusing vision tokens)
168
+ "<|vision_end|>", # Speech end
169
+ "<|vision_pad|>", # Speech diffusion pad
170
+ ]
171
+ }
172
+ num_added = self.add_special_tokens(special_tokens)
173
+
174
+ # Cache special token IDs
175
+ self._speech_start_id = self.convert_tokens_to_ids("<|vision_start|>")
176
+ self._speech_end_id = self.convert_tokens_to_ids("<|vision_end|>")
177
+ self._speech_diffusion_id = self.convert_tokens_to_ids("<|vision_pad|>")
178
+
179
+ # self._eos_id = self.convert_tokens_to_ids('<|endoftext|>')
180
+ self._eos_id = self.eos_token_id # qwen2 / qwen3
181
+ self._pad_id = self.convert_tokens_to_ids('<|image_pad|>')
182
+
183
+ return num_added
184
+
185
+ @property
186
+ def eos_id(self) -> int:
187
+ """Id of the end of sequence token."""
188
+ return self._eos_id
189
+
190
+ @property
191
+ def speech_start_id(self) -> int:
192
+ """Id of the speech start token."""
193
+ return self._speech_start_id
194
+
195
+ @property
196
+ def speech_end_id(self) -> int:
197
+ """Id of the speech end token."""
198
+ return self._speech_end_id
199
+
200
+ @property
201
+ def speech_diffusion_id(self) -> int:
202
+ """Id of the speech diffusion token."""
203
+ return self._speech_diffusion_id
204
+
205
+ @property
206
+ def pad_id(self) -> int:
207
+ """Id used for padding (returns -100 for loss masking)."""
208
+ return self._pad_id
209
+
210
+
211
+ __all__ = [
212
+ "VibeVoiceTextTokenizer",
213
+ "VibeVoiceTextTokenizerFast",
214
+ ]
VibeVoice-finetuning/src/vibevoice/modular/modular_vibevoice_tokenizer.py ADDED
@@ -0,0 +1,1195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import typing as tp
3
+ from functools import partial
4
+ from dataclasses import dataclass, field
5
+ from typing import Dict, List, Optional, Tuple, Union
6
+ import copy
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+
13
+ from transformers.models.auto import AutoModel
14
+
15
+ from transformers.configuration_utils import PretrainedConfig
16
+ from transformers.utils import logging
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.activations import ACT2FN
19
+
20
+ from .configuration_vibevoice import VibeVoiceAcousticTokenizerConfig, VibeVoiceSemanticTokenizerConfig
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ import os
25
+ # Try to import APEX FusedRMSNorm
26
+ try:
27
+ from apex.normalization.fused_layer_norm import fused_rms_norm_affine
28
+ APEX_AVAILABLE = True
29
+ logger.info("APEX FusedRMSNorm is available and will be used for optimization")
30
+ if int(os.getenv("OPTIMIZE_FOR_SPEED", "0")) == 0:
31
+ APEX_AVAILABLE = False
32
+ logger.warning("APEX FusedRMSNorm is disabled by environment variable OPTIMIZE_FOR_SPEED=0")
33
+ except ImportError:
34
+ APEX_AVAILABLE = False
35
+ logger.warning("APEX FusedRMSNorm not available, using native implementation")
36
+ # APEX_AVAILABLE=False
37
+
38
+ # Normalization modules
39
+ class ConvLayerNorm(nn.LayerNorm):
40
+ """
41
+ Convolution-friendly LayerNorm that moves channels to last dimensions
42
+ before running the normalization and moves them back to original position right after.
43
+ """
44
+ def __init__(self, normalized_shape: tp.Union[int, tp.List[int], torch.Size], **kwargs):
45
+ super().__init__(normalized_shape, **kwargs)
46
+
47
+ def forward(self, x):
48
+ x = x.transpose(1, 2) # b ... t -> b t ...
49
+ x = nn.functional.layer_norm(x.float(), self.normalized_shape, self.weight.float(), self.bias.float(), self.eps).type_as(x)
50
+ x = x.transpose(1, 2) # b t ... -> b ... t
51
+ return x
52
+
53
+ class RMSNorm(nn.Module):
54
+ def __init__(self, dim: int, eps: float = 1e-5, elementwise_affine=True, weight_shape=None):
55
+ super().__init__()
56
+ self.dim = dim
57
+ self.eps = eps
58
+ self.elementwise_affine = elementwise_affine
59
+ if self.elementwise_affine:
60
+ weight_shape = (dim,) if weight_shape is None else weight_shape
61
+ self.weight = nn.Parameter(torch.ones(weight_shape))
62
+ else:
63
+ self.register_parameter('weight', None)
64
+
65
+ def _norm(self, x):
66
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
67
+
68
+ def forward(self, x):
69
+ output = self._norm(x.float()).type_as(x)
70
+ if self.weight is not None:
71
+ output = output * self.weight
72
+ return output
73
+
74
+ def extra_repr(self) -> str:
75
+ return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}'
76
+
77
+ class ConvRMSNorm(RMSNorm):
78
+ def __init__(self, dim: int, eps: float = 1e-5, elementwise_affine=True, weight_shape=None):
79
+ super().__init__(dim, eps, elementwise_affine, weight_shape)
80
+
81
+ def forward(self, x):
82
+ x = x.transpose(1, 2) # b ... t -> b t ...
83
+ if (not APEX_AVAILABLE) or (not self.elementwise_affine):
84
+ # Fallback to native implementation
85
+ output = self._norm(x.float()).type_as(x)
86
+ if self.weight is not None:
87
+ output = output * self.weight
88
+ else:
89
+ output = fused_rms_norm_affine(x, self.weight, self.weight.shape, self.eps)
90
+ output = output.transpose(1, 2) # b t ... -> b ... t
91
+ return output
92
+
93
+ # Convolutional layers and utilities
94
+ CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
95
+ 'time_layer_norm', 'layer_norm', 'time_group_norm'])
96
+
97
+
98
+ def apply_parametrization_norm(module: nn.Module, norm: str = 'none') -> nn.Module:
99
+ assert norm in CONV_NORMALIZATIONS
100
+ if norm == 'weight_norm':
101
+ return nn.utils.weight_norm(module)
102
+ elif norm == 'spectral_norm':
103
+ return nn.utils.spectral_norm(module)
104
+ else:
105
+ # We already check was in CONV_NORMALIZATION, so any other choice
106
+ # doesn't need reparametrization.
107
+ return module
108
+
109
+
110
+ def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs) -> nn.Module:
111
+ """Return the proper normalization module. If causal is True, this will ensure the returned
112
+ module is causal, or return an error if the normalization doesn't support causal evaluation.
113
+ """
114
+ assert norm in CONV_NORMALIZATIONS
115
+ if norm == 'layer_norm':
116
+ assert isinstance(module, nn.modules.conv._ConvNd)
117
+ return ConvLayerNorm(module.out_channels, **norm_kwargs)
118
+ elif norm == 'time_group_norm':
119
+ if causal:
120
+ raise ValueError("GroupNorm doesn't support causal evaluation.")
121
+ assert isinstance(module, nn.modules.conv._ConvNd)
122
+ return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
123
+ else:
124
+ return nn.Identity()
125
+
126
+
127
+ def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
128
+ padding_total: int = 0) -> int:
129
+ """Calculate extra padding needed for convolution to have the same output length"""
130
+ length = x.shape[-1]
131
+ n_frames = (length - kernel_size + padding_total) / stride + 1
132
+ ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
133
+ return ideal_length - length
134
+
135
+
136
+ def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'zero', value: float = 0.):
137
+ """Pad 1D input with handling for small inputs in reflect mode"""
138
+ length = x.shape[-1]
139
+ padding_left, padding_right = paddings
140
+ assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
141
+ if mode == 'reflect':
142
+ max_pad = max(padding_left, padding_right)
143
+ extra_pad = 0
144
+ if length <= max_pad:
145
+ extra_pad = max_pad - length + 1
146
+ x = F.pad(x, (0, extra_pad))
147
+ padded = F.pad(x, paddings, mode, value)
148
+ end = padded.shape[-1] - extra_pad
149
+ return padded[..., :end]
150
+ else:
151
+ return F.pad(x, paddings, mode, value)
152
+
153
+
154
+ def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
155
+ """Remove padding from x, handling properly zero padding. Only for 1d!"""
156
+ padding_left, padding_right = paddings
157
+ assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
158
+ assert (padding_left + padding_right) <= x.shape[-1]
159
+ end = x.shape[-1] - padding_right
160
+ return x[..., padding_left: end]
161
+
162
+
163
+ class NormConv1d(nn.Module):
164
+ """Wrapper around Conv1d and normalization applied to this conv"""
165
+ def __init__(self, *args, causal: bool = False, norm: str = 'none',
166
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
167
+ super().__init__()
168
+ self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
169
+ self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
170
+ self.norm_type = norm
171
+
172
+ def forward(self, x):
173
+ x = self.conv(x)
174
+ x = self.norm(x)
175
+ return x
176
+
177
+
178
+ class NormConvTranspose1d(nn.Module):
179
+ """Wrapper around ConvTranspose1d and normalization applied to this conv"""
180
+ def __init__(self, *args, causal: bool = False, norm: str = 'none',
181
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
182
+ super().__init__()
183
+ self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
184
+ self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
185
+ self.norm_type = norm
186
+
187
+ def forward(self, x):
188
+ x = self.convtr(x)
189
+ x = self.norm(x)
190
+ return x
191
+
192
+
193
+ class VibeVoiceTokenizerStreamingCache:
194
+ """Cache for streaming convolution, similar to KV cache in attention"""
195
+ def __init__(self):
196
+ self.cache = {} # Dict mapping (layer_id, sample_idx) to state tensor
197
+
198
+ def get(self, layer_id: str, sample_indices: torch.Tensor) -> Optional[torch.Tensor]:
199
+ """Get cached states for given layer and sample indices"""
200
+ states = []
201
+ max_length = 0
202
+
203
+ # First pass: collect states and find max length
204
+ for idx in sample_indices.tolist():
205
+ key = (layer_id, idx)
206
+ if key not in self.cache:
207
+ return None # If any sample is missing, return None
208
+ state = self.cache[key]
209
+ states.append(state)
210
+ max_length = max(max_length, state.shape[-1])
211
+
212
+ # Second pass: pad states to max length if needed
213
+ if len(states) > 0 and states[0].dim() >= 2:
214
+ padded_states = []
215
+ for state in states:
216
+ if state.shape[-1] < max_length:
217
+ # Pad on the time dimension (last dimension)
218
+ pad_size = max_length - state.shape[-1]
219
+ # Pad with zeros on the LEFT to align the most recent samples
220
+ padded_state = F.pad(state, (pad_size, 0), mode='constant', value=0)
221
+ padded_states.append(padded_state)
222
+ else:
223
+ padded_states.append(state)
224
+ return torch.stack(padded_states, dim=0)
225
+ else:
226
+ return torch.stack(states, dim=0)
227
+
228
+ def set(self, layer_id: str, sample_indices: torch.Tensor, states: torch.Tensor):
229
+ """Set cached states for given layer and sample indices"""
230
+ for i, idx in enumerate(sample_indices.tolist()):
231
+ key = (layer_id, idx)
232
+ self.cache[key] = states[i].detach()
233
+
234
+ def set_to_zero(self, sample_indices: torch.Tensor):
235
+ """Set all cached states to zero for given sample indices"""
236
+ for key in list(self.cache.keys()):
237
+ layer_id, sample_idx = key
238
+ if sample_idx in sample_indices.tolist():
239
+ # Create zero tensor with same shape and dtype as cached tensor
240
+ cached_tensor = self.cache[key]
241
+ self.cache[key] = torch.zeros_like(cached_tensor)
242
+
243
+ def clear(self, layer_id: Optional[str] = None, sample_indices: Optional[torch.Tensor] = None):
244
+ """Clear cache for specific layer/samples or everything"""
245
+ if layer_id is None and sample_indices is None:
246
+ self.cache.clear()
247
+ elif layer_id is not None and sample_indices is None:
248
+ # Clear all samples for a specific layer
249
+ keys_to_remove = [k for k in self.cache.keys() if k[0] == layer_id]
250
+ for k in keys_to_remove:
251
+ del self.cache[k]
252
+ elif layer_id is not None and sample_indices is not None:
253
+ # Clear specific samples for a specific layer
254
+ for idx in sample_indices.tolist():
255
+ key = (layer_id, idx)
256
+ self.cache.pop(key, None)
257
+
258
+ class SConv1d(nn.Module):
259
+ """Conv1d with built-in handling of asymmetric or causal padding and normalization."""
260
+ def __init__(self, in_channels: int, out_channels: int,
261
+ kernel_size: int, stride: int = 1, dilation: int = 1,
262
+ groups: int = 1, bias: bool = True, causal: bool = False,
263
+ norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
264
+ pad_mode: str = 'reflect'):
265
+ super().__init__()
266
+ self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
267
+ dilation=dilation, groups=groups, bias=bias, causal=causal,
268
+ norm=norm, norm_kwargs=norm_kwargs)
269
+ self.causal = causal
270
+ self.pad_mode = pad_mode
271
+
272
+ # Store configuration
273
+ self.kernel_size = kernel_size
274
+ self.dilation = dilation
275
+ self.stride = stride
276
+ self.in_channels = in_channels
277
+ self.out_channels = out_channels
278
+
279
+ # For causal convolution, we need to maintain kernel_size - 1 samples as context
280
+ # need to check use which context_size is more suitable
281
+ # self.context_size = (kernel_size - 1) * dilation
282
+ self.context_size = (kernel_size - 1) * dilation - (stride - 1)
283
+
284
+ # For non-streaming mode, calculate padding
285
+ self.padding_total = (kernel_size - 1) * dilation - (stride - 1)
286
+
287
+ # Create a unique layer ID for cache management
288
+ self._layer_id = None
289
+
290
+ @property
291
+ def layer_id(self):
292
+ if self._layer_id is None:
293
+ self._layer_id = f"sconv1d_{id(self)}"
294
+ return self._layer_id
295
+
296
+ def forward(self, x: torch.Tensor,
297
+ cache: Optional[VibeVoiceTokenizerStreamingCache] = None,
298
+ sample_indices: Optional[torch.Tensor] = None,
299
+ use_cache: bool = False,
300
+ debug: bool = False) -> torch.Tensor:
301
+ """
302
+ Forward pass with optional streaming support via cache.
303
+
304
+ Args:
305
+ x: Input tensor [batch_size, channels, time]
306
+ cache: VibeVoiceTokenizerStreamingCache object for maintaining states
307
+ sample_indices: Indices identifying each sample for cache management
308
+ use_cache: Whether to use cached states for streaming
309
+ debug: Whether to print debug information
310
+
311
+ Returns:
312
+ Output tensor
313
+ """
314
+ B, C, T = x.shape
315
+
316
+ # Non-streaming mode
317
+ if not use_cache or cache is None:
318
+ return self._forward_non_streaming(x, debug=debug)
319
+
320
+ # Streaming mode
321
+ assert self.causal, "Streaming mode is only supported for causal convolutions"
322
+ assert sample_indices is not None, "sample_indices must be provided for streaming mode"
323
+ assert len(sample_indices) == B, "sample_indices must match batch size"
324
+
325
+ return self._forward_streaming(x, cache, sample_indices, debug)
326
+
327
+ def _forward_streaming(self, x: torch.Tensor,
328
+ cache: VibeVoiceTokenizerStreamingCache,
329
+ sample_indices: torch.Tensor,
330
+ debug: bool = False) -> torch.Tensor:
331
+ """Streaming forward pass with cache operations kept separate from compiled code"""
332
+ B, C, T = x.shape
333
+
334
+ # Cache operations (not compiled)
335
+ cached_states = cache.get(self.layer_id, sample_indices)
336
+
337
+ if cached_states is None:
338
+ # First chunk - initialize with zeros for context
339
+ if self.context_size > 0:
340
+ cached_states = torch.zeros(B, C, self.context_size, device=x.device, dtype=x.dtype)
341
+ if debug:
342
+ print(f"[DEBUG] Initialized cache with shape: {cached_states.shape}, context_size={self.context_size}")
343
+ else:
344
+ cached_states = torch.zeros(B, C, 0, device=x.device, dtype=x.dtype)
345
+ if debug:
346
+ print(f"[DEBUG] No context needed (kernel_size=stride)")
347
+
348
+ # Concatenate cached states with input
349
+ if cached_states.shape[2] > 0:
350
+ input_with_context = torch.cat([cached_states, x], dim=2)
351
+ else:
352
+ input_with_context = x
353
+
354
+ if debug:
355
+ print(f"[DEBUG] Input shape: {x.shape}, Cache shape: {cached_states.shape}, Combined: {input_with_context.shape}")
356
+
357
+ # Apply convolution directly - no extra padding in streaming mode
358
+ # The conv layer will handle its own padding internally
359
+ output = self.conv(input_with_context)
360
+
361
+ if debug:
362
+ print(f"[DEBUG] Output shape: {output.shape}")
363
+
364
+ # Update cache for next chunk
365
+ if self.context_size > 0:
366
+ # Calculate how many samples to keep
367
+ total_input_length = input_with_context.shape[2]
368
+
369
+ # Keep the last context_size samples
370
+ if total_input_length >= self.context_size:
371
+ new_cache_start = total_input_length - self.context_size
372
+ new_cache = input_with_context[:, :, new_cache_start:]
373
+ else:
374
+ # If we have less than context_size samples, keep everything
375
+ new_cache = input_with_context
376
+
377
+ if debug:
378
+ print(f"[DEBUG] New cache shape: {new_cache.shape}")
379
+
380
+ cache.set(self.layer_id, sample_indices, new_cache)
381
+
382
+ return output
383
+
384
+ def _forward_non_streaming(self, x: torch.Tensor, debug: bool = False) -> torch.Tensor:
385
+ """Standard forward pass without streaming"""
386
+ B, C, T = x.shape
387
+ kernel_size = self.kernel_size
388
+ stride = self.stride
389
+ dilation = self.dilation
390
+ padding_total = self.padding_total
391
+
392
+ # Compute extra padding for stride alignment
393
+ extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
394
+
395
+ if debug:
396
+ print(f"[DEBUG NON-STREAMING] Input shape: {x.shape}, padding_total={padding_total}, extra_padding={extra_padding}")
397
+
398
+ if self.causal:
399
+ # Left padding for causal
400
+ if self.pad_mode == 'constant':
401
+ x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode, value=0)
402
+ else:
403
+ x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
404
+ else:
405
+ # Symmetric padding for non-causal
406
+ padding_right = padding_total // 2
407
+ padding_left = padding_total - padding_right
408
+ x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
409
+
410
+ if debug:
411
+ print(f"[DEBUG NON-STREAMING] After padding: {x.shape}")
412
+
413
+ output = self.conv(x)
414
+
415
+ if debug:
416
+ print(f"[DEBUG NON-STREAMING] Output shape: {output.shape}")
417
+
418
+ return output
419
+
420
+
421
+ class SConvTranspose1d(nn.Module):
422
+ """ConvTranspose1d with built-in handling of asymmetric or causal padding and normalization."""
423
+ def __init__(self, in_channels: int, out_channels: int,
424
+ kernel_size: int, stride: int = 1, causal: bool = False,
425
+ norm: str = 'none', trim_right_ratio: float = 1.,
426
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, bias: bool = True):
427
+ super().__init__()
428
+ self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
429
+ causal=causal, norm=norm, norm_kwargs=norm_kwargs, bias=bias)
430
+ self.causal = causal
431
+ self.trim_right_ratio = trim_right_ratio
432
+ assert self.causal or self.trim_right_ratio == 1., \
433
+ "`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
434
+ assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
435
+
436
+ # Store configuration
437
+ self.kernel_size = kernel_size
438
+ self.stride = stride
439
+ self.in_channels = in_channels
440
+ self.out_channels = out_channels
441
+
442
+ # For transposed convolution, padding calculation is different
443
+ self.padding_total = kernel_size - stride
444
+
445
+ # For streaming, we need to keep track of input history
446
+ # Transposed conv needs to see multiple input samples to produce correct output
447
+ self.context_size = kernel_size - 1
448
+
449
+ # Create a unique layer ID for cache management
450
+ self._layer_id = None
451
+
452
+ @property
453
+ def layer_id(self):
454
+ if self._layer_id is None:
455
+ self._layer_id = f"sconvtr1d_{id(self)}"
456
+ return self._layer_id
457
+
458
+ def forward(self, x: torch.Tensor,
459
+ cache: Optional[VibeVoiceTokenizerStreamingCache] = None,
460
+ sample_indices: Optional[torch.Tensor] = None,
461
+ use_cache: bool = False,
462
+ debug: bool = False) -> torch.Tensor:
463
+ """
464
+ Forward pass with optional streaming support via cache.
465
+ """
466
+ B, C, T = x.shape
467
+
468
+ # Non-streaming mode
469
+ if not use_cache or cache is None:
470
+ return self._forward_non_streaming(x, debug=debug)
471
+
472
+ # Streaming mode
473
+ assert sample_indices is not None, "sample_indices must be provided for streaming mode"
474
+ assert len(sample_indices) == B, "sample_indices must match batch size"
475
+
476
+ return self._forward_streaming(x, cache, sample_indices, debug)
477
+
478
+ def _forward_streaming(self, x: torch.Tensor,
479
+ cache: VibeVoiceTokenizerStreamingCache,
480
+ sample_indices: torch.Tensor,
481
+ debug: bool = False) -> torch.Tensor:
482
+ """Streaming forward pass with cache operations kept separate from compiled code"""
483
+ B, C, T = x.shape
484
+
485
+ # Cache operations (not compiled)
486
+ cached_input = cache.get(self.layer_id, sample_indices)
487
+
488
+ if cached_input is None:
489
+ # First chunk - no history yet
490
+ cached_input = torch.zeros(B, C, 0, device=x.device, dtype=x.dtype)
491
+ if debug:
492
+ print(f"[DEBUG] Initialized empty cache for transposed conv")
493
+
494
+ # Concatenate cached input with new input
495
+ full_input = torch.cat([cached_input, x], dim=2)
496
+
497
+ if debug:
498
+ print(f"[DEBUG] Input shape: {x.shape}, Cache shape: {cached_input.shape}, Combined: {full_input.shape}")
499
+
500
+ # First chunk or debug mode - use uncompiled version
501
+ full_output = self.convtr(full_input)
502
+
503
+ if debug:
504
+ print(f"[DEBUG] Full transposed conv output shape: {full_output.shape}")
505
+
506
+ # Calculate padding to remove
507
+ if self.causal:
508
+ padding_right = math.ceil(self.padding_total * self.trim_right_ratio)
509
+ padding_left = self.padding_total - padding_right
510
+ else:
511
+ padding_right = self.padding_total // 2
512
+ padding_left = self.padding_total - padding_right
513
+
514
+ # Remove padding
515
+ if padding_left + padding_right > 0:
516
+ full_output = unpad1d(full_output, (padding_left, padding_right))
517
+
518
+ if debug:
519
+ print(f"[DEBUG] After unpadding: {full_output.shape}")
520
+
521
+ # Determine which part of the output corresponds to the new input
522
+ if cached_input.shape[2] == 0:
523
+ # First chunk - return all output
524
+ output = full_output
525
+ else:
526
+ # Subsequent chunks - return only the new output
527
+ expected_new_output = T * self.stride
528
+
529
+ # Take the last expected_new_output samples
530
+ if full_output.shape[2] >= expected_new_output:
531
+ output = full_output[:, :, -expected_new_output:]
532
+ else:
533
+ output = full_output
534
+
535
+ if debug:
536
+ print(f"[DEBUG] Final streaming output shape: {output.shape}")
537
+
538
+ # Update cache
539
+ if full_input.shape[2] > self.context_size:
540
+ new_cache = full_input[:, :, -self.context_size:]
541
+ else:
542
+ new_cache = full_input
543
+
544
+ if debug:
545
+ print(f"[DEBUG] New cache shape: {new_cache.shape}")
546
+
547
+ cache.set(self.layer_id, sample_indices, new_cache)
548
+
549
+ return output
550
+
551
+ def _forward_non_streaming(self, x: torch.Tensor, debug: bool = False) -> torch.Tensor:
552
+ """Standard forward pass without streaming"""
553
+ if debug:
554
+ print(f"[DEBUG NON-STREAMING] Input shape: {x.shape}")
555
+
556
+ # Apply transposed convolution
557
+ y = self.convtr(x)
558
+
559
+ if debug:
560
+ print(f"[DEBUG NON-STREAMING] After transposed conv: {y.shape}")
561
+
562
+ # Calculate and remove padding
563
+ if self.causal:
564
+ padding_right = math.ceil(self.padding_total * self.trim_right_ratio)
565
+ padding_left = self.padding_total - padding_right
566
+ else:
567
+ padding_right = self.padding_total // 2
568
+ padding_left = self.padding_total - padding_right
569
+
570
+ if padding_left + padding_right > 0:
571
+ y = unpad1d(y, (padding_left, padding_right))
572
+
573
+ if debug:
574
+ print(f"[DEBUG NON-STREAMING] Final output shape: {y.shape}")
575
+
576
+ return y
577
+
578
+ # FFN
579
+ class FFN(nn.Module):
580
+ def __init__(
581
+ self,
582
+ embed_dim,
583
+ ffn_dim,
584
+ bias=False,
585
+ ):
586
+ super().__init__()
587
+ self.embed_dim = embed_dim
588
+ self.linear1 = nn.Linear(self.embed_dim, ffn_dim, bias=bias)
589
+ self.gelu = ACT2FN["gelu"]
590
+ self.linear2 = nn.Linear(ffn_dim, self.embed_dim, bias=bias)
591
+
592
+ def forward(self, x):
593
+ x = self.linear1(x)
594
+ x = self.gelu(x)
595
+ x = self.linear2(x)
596
+ return x
597
+
598
+
599
+ class Convlayer(nn.Module):
600
+ def __init__(
601
+ self,
602
+ in_channels,
603
+ out_channels,
604
+ kernel_size,
605
+ stride=1,
606
+ dilation=1,
607
+ groups=1,
608
+ bias=True,
609
+ pad_mode='zeros',
610
+ norm='weight_norm',
611
+ causal=True,
612
+ ):
613
+ super().__init__()
614
+ self.conv = SConv1d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation,
615
+ groups=groups, bias=bias, pad_mode=pad_mode, norm=norm, causal=causal)
616
+
617
+ def forward(self, x):
618
+ return self.conv(x)
619
+
620
+ class Block1D(nn.Module):
621
+ def __init__(self, dim, kernel_size=7, drop_path=0., mixer_layer='conv',
622
+ layer_scale_init_value=1e-6, **kwargs):
623
+ super().__init__()
624
+
625
+ if kwargs.get('layernorm', 'LN') == 'LN':
626
+ self.norm = ConvLayerNorm(dim, eps=kwargs.get('eps', 1e-6))
627
+ self.ffn_norm = ConvLayerNorm(dim, eps=kwargs.get('eps', 1e-6))
628
+ elif kwargs.get('layernorm', 'RMSNorm') == 'RMSNorm':
629
+ self.norm = ConvRMSNorm(dim, eps=kwargs.get('eps', 1e-6))
630
+ self.ffn_norm = ConvRMSNorm(dim, eps=kwargs.get('eps', 1e-6))
631
+
632
+ if mixer_layer == 'conv':
633
+ self.mixer = Convlayer(dim, dim, groups=kwargs.get('groups', 1),
634
+ kernel_size=kernel_size,
635
+ pad_mode=kwargs.get('pad_mode', 'reflect'),
636
+ norm=kwargs.get('norm', 'none'),
637
+ causal=kwargs.get('causal', True),
638
+ bias=kwargs.get('bias', True),
639
+ )
640
+ elif mixer_layer == 'depthwise_conv':
641
+ self.mixer = Convlayer(dim, dim, groups=dim,
642
+ kernel_size=kernel_size,
643
+ pad_mode=kwargs.get('pad_mode', 'reflect'),
644
+ norm=kwargs.get('norm', 'none'),
645
+ causal=kwargs.get('causal', True),
646
+ bias=kwargs.get('bias', True),
647
+ )
648
+ else:
649
+ raise ValueError(f"Unsupported mixer layer: {mixer_layer}")
650
+
651
+ self.ffn = FFN(
652
+ dim,
653
+ kwargs.get('ffn_expansion', 4) * dim,
654
+ bias=kwargs.get('bias', False),
655
+ )
656
+ self.drop_path = nn.Identity() if drop_path <= 0. else nn.modules.DropPath(drop_path)
657
+
658
+ if layer_scale_init_value > 0:
659
+ self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
660
+ self.ffn_gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
661
+ else:
662
+ self.gamma = None
663
+ self.ffn_gamma = None
664
+
665
+ def forward(self, x):
666
+ # mixer
667
+ residual = x
668
+ x = self.norm(x)
669
+ x = self.mixer(x)
670
+ if self.gamma is not None:
671
+ x = x * self.gamma.unsqueeze(-1)
672
+ x = residual + self.drop_path(x)
673
+
674
+ # ffn
675
+ residual = x
676
+ x = self.ffn_norm(x)
677
+ x = x.permute(0, 2, 1)
678
+ x = self.ffn(x)
679
+ x = x.permute(0, 2, 1)
680
+ if self.ffn_gamma is not None:
681
+ x = x * self.ffn_gamma.unsqueeze(-1)
682
+ x = residual + self.drop_path(x)
683
+
684
+ return x
685
+
686
+
687
+ class TokenizerEncoder(nn.Module):
688
+ """
689
+ Encoder component for the VibeVoice tokenizer that converts audio to latent representations.
690
+
691
+ Args:
692
+ config: Configuration object with model parameters
693
+ """
694
+ def __init__(self, config):
695
+ super().__init__()
696
+
697
+ # Extract parameters from config
698
+ self.channels = config.channels
699
+ self.dimension = config.dimension
700
+ self.n_filters = config.n_filters
701
+ self.ratios = list(reversed(config.ratios))
702
+ self.depths = config.depths
703
+ self.n_residual_layers = getattr(config, "n_residual_layers", 1)
704
+ self.hop_length = np.prod(self.ratios)
705
+ self.causal = config.causal
706
+
707
+ # Additional config parameters with defaults
708
+ kernel_size = getattr(config, "kernel_size", 7)
709
+ last_kernel_size = getattr(config, "last_kernel_size", 7)
710
+ norm = getattr(config, "norm", "none")
711
+ norm_params = getattr(config, "norm_params", {})
712
+ pad_mode = getattr(config, "pad_mode", "reflect")
713
+ bias = getattr(config, "bias", True)
714
+ layernorm = getattr(config, "layernorm", "LN")
715
+ layernorm_eps = getattr(config, "layernorm_eps", 1e-6)
716
+ layernorm_elementwise_affine = getattr(config, "layernorm_elementwise_affine", True)
717
+ drop_path_rate = getattr(config, "drop_path_rate", 0.0)
718
+ mixer_layer = getattr(config, "mixer_layer", "conv")
719
+ layer_scale_init_value = getattr(config, "layer_scale_init_value", 0)
720
+ disable_last_norm = getattr(config, "disable_last_norm", False)
721
+
722
+ # determine the norm type based on layernorm
723
+ if layernorm == 'LN':
724
+ norm_type = ConvLayerNorm
725
+ elif layernorm == 'RMSNorm':
726
+ norm_type = partial(ConvRMSNorm, elementwise_affine=layernorm_elementwise_affine)
727
+ else:
728
+ raise ValueError(f"Unsupported norm type: {layernorm}")
729
+
730
+ # stem and intermediate downsampling conv layers
731
+ stem = nn.Sequential(
732
+ SConv1d(self.channels, self.n_filters, kernel_size, norm=norm, norm_kwargs=norm_params, causal=self.causal, pad_mode=pad_mode, bias=bias),
733
+ )
734
+
735
+ self.downsample_layers = nn.ModuleList()
736
+ self.downsample_layers.append(stem)
737
+ for i in range(len(self.ratios)):
738
+ in_ch = self.n_filters * (2 ** i)
739
+ out_ch = self.n_filters * (2 ** (i + 1))
740
+ downsample_layer = nn.Sequential(
741
+ SConv1d(in_ch, out_ch, kernel_size=self.ratios[i] * 2, stride=self.ratios[i], causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
742
+ )
743
+ self.downsample_layers.append(downsample_layer)
744
+
745
+ # configure the transformer blocks
746
+ layer_type = partial(
747
+ Block1D,
748
+ mixer_layer=mixer_layer,
749
+ layernorm=layernorm,
750
+ eps=layernorm_eps,
751
+ causal=self.causal,
752
+ pad_mode=pad_mode,
753
+ norm=norm,
754
+ bias=bias,
755
+ layer_scale_init_value=layer_scale_init_value,
756
+ )
757
+
758
+ self.stages = nn.ModuleList()
759
+ dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
760
+ cur = 0
761
+
762
+ for i in range(len(self.depths)):
763
+ in_ch = self.n_filters * (2 ** i)
764
+ stage = nn.Sequential(
765
+ *[layer_type(dim=in_ch, drop_path=dp_rates[cur + j]) for j in range(self.depths[i])]
766
+ )
767
+ self.stages.append(stage)
768
+ cur += self.depths[i]
769
+
770
+ if not disable_last_norm:
771
+ self.norm = norm_type(in_ch, eps=layernorm_eps)
772
+ else:
773
+ self.norm = nn.Identity()
774
+ self.head = SConv1d(in_ch, self.dimension, kernel_size=last_kernel_size, causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
775
+
776
+ def forward_features(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
777
+ for i in range(len(self.depths)):
778
+ # Apply downsampling
779
+ for layer in self.downsample_layers[i]:
780
+ if isinstance(layer, SConv1d):
781
+ x = layer(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
782
+ else:
783
+ x = layer(x)
784
+
785
+ # Apply stage (Block1D contains Convlayer which contains SConv1d)
786
+ for block in self.stages[i]:
787
+ if hasattr(block, 'mixer') and hasattr(block.mixer, 'conv') and isinstance(block.mixer.conv, SConv1d):
788
+ # Block1D forward with cache support
789
+ residual = x
790
+ x = block.norm(x)
791
+ x = block.mixer.conv(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
792
+ if block.gamma is not None:
793
+ x = x * block.gamma.unsqueeze(-1)
794
+ x = residual + x
795
+
796
+ # FFN part
797
+ residual = x
798
+ x = block.ffn_norm(x)
799
+ x = x.permute(0, 2, 1)
800
+ x = block.ffn(x)
801
+ x = x.permute(0, 2, 1)
802
+ if block.ffn_gamma is not None:
803
+ x = x * block.ffn_gamma.unsqueeze(-1)
804
+ x = residual + x
805
+ else:
806
+ x = block(x)
807
+
808
+ return self.norm(x)
809
+
810
+ def forward(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
811
+ x = self.forward_features(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
812
+ x = self.head(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
813
+ return x
814
+
815
+
816
+ class TokenizerDecoder(nn.Module):
817
+ """
818
+ Decoder component for the VibeVoice tokenizer that converts latent representations back to audio.
819
+
820
+ Args:
821
+ config: Configuration object with model parameters
822
+ """
823
+ def __init__(self, config):
824
+ super().__init__()
825
+
826
+ # Extract parameters from config
827
+ self.dimension = config.dimension
828
+ self.channels = config.channels
829
+ self.n_filters = config.n_filters
830
+ self.ratios = config.ratios
831
+
832
+ # IMPORTANT CHANGE: Don't reverse depths again since they're already reversed in VibeVoiceAcousticTokenizerModel
833
+ self.depths = config.depths # Changed from list(reversed(config.depths))
834
+
835
+ self.n_residual_layers = getattr(config, "n_residual_layers", 1)
836
+ self.hop_length = np.prod(self.ratios)
837
+ self.causal = config.causal
838
+
839
+ # Additional config parameters with defaults
840
+ kernel_size = getattr(config, "kernel_size", 7)
841
+ last_kernel_size = getattr(config, "last_kernel_size", 7)
842
+ norm = getattr(config, "norm", "none")
843
+ norm_params = getattr(config, "norm_params", {})
844
+ pad_mode = getattr(config, "pad_mode", "reflect")
845
+ bias = getattr(config, "bias", True)
846
+ layernorm = getattr(config, "layernorm", "LN")
847
+ layernorm_eps = getattr(config, "layernorm_eps", 1e-6)
848
+ trim_right_ratio = getattr(config, "trim_right_ratio", 1.0)
849
+ layernorm_elementwise_affine = getattr(config, "layernorm_elementwise_affine", True)
850
+ drop_path_rate = getattr(config, "drop_path_rate", 0.0)
851
+ mixer_layer = getattr(config, "mixer_layer", "conv")
852
+ layer_scale_init_value = getattr(config, "layer_scale_init_value", 0)
853
+ disable_last_norm = getattr(config, "disable_last_norm", False)
854
+
855
+ # determine the norm type based on layernorm
856
+ if layernorm == 'LN':
857
+ norm_type = ConvLayerNorm
858
+ elif layernorm == 'RMSNorm':
859
+ norm_type = partial(ConvRMSNorm, elementwise_affine=layernorm_elementwise_affine)
860
+ else:
861
+ raise ValueError(f"Unsupported norm type: {layernorm}")
862
+
863
+ # stem and upsampling layers
864
+ stem = nn.Sequential(
865
+ SConv1d(self.dimension, self.n_filters * 2 ** (len(self.depths) - 1), kernel_size, norm=norm,
866
+ norm_kwargs=norm_params, causal=self.causal, pad_mode=pad_mode, bias=bias),
867
+ )
868
+
869
+ self.upsample_layers = nn.ModuleList()
870
+ self.upsample_layers.append(stem)
871
+ for i in range(len(self.ratios)):
872
+ in_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i))
873
+ out_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i - 1))
874
+ upsample_layer = nn.Sequential(
875
+ SConvTranspose1d(in_ch, out_ch,
876
+ kernel_size=self.ratios[i] * 2, stride=self.ratios[i],
877
+ norm=norm, norm_kwargs=norm_params, bias=bias,
878
+ causal=self.causal, trim_right_ratio=trim_right_ratio),
879
+ )
880
+ self.upsample_layers.append(upsample_layer)
881
+
882
+ # configure transformer blocks
883
+ layer_type = partial(
884
+ Block1D,
885
+ mixer_layer=mixer_layer,
886
+ layernorm=layernorm,
887
+ eps=layernorm_eps,
888
+ causal=self.causal,
889
+ pad_mode=pad_mode,
890
+ norm=norm,
891
+ bias=bias,
892
+ layer_scale_init_value=layer_scale_init_value,
893
+ )
894
+
895
+ self.stages = nn.ModuleList()
896
+ dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
897
+ cur = 0
898
+
899
+ # Create stages in the same order as the original model
900
+ for i in range(len(self.depths)):
901
+ in_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i))
902
+ stage = nn.Sequential(
903
+ *[layer_type(dim=in_ch, drop_path=dp_rates[cur + j]) for j in range(self.depths[i])]
904
+ )
905
+ self.stages.append(stage)
906
+ cur += self.depths[i]
907
+
908
+ if not disable_last_norm:
909
+ self.norm = norm_type(in_ch, eps=layernorm_eps)
910
+ else:
911
+ self.norm = nn.Identity()
912
+ self.head = SConv1d(in_ch, self.channels, kernel_size=last_kernel_size, causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
913
+
914
+ def forward_features(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
915
+ for i in range(len(self.depths)):
916
+ # Apply upsampling
917
+ for layer in self.upsample_layers[i]:
918
+ if isinstance(layer, (SConv1d, SConvTranspose1d)):
919
+ x = layer(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
920
+ else:
921
+ x = layer(x)
922
+
923
+ # Apply stage (Block1D contains Convlayer which contains SConv1d)
924
+ for block in self.stages[i]:
925
+ if hasattr(block, 'mixer') and hasattr(block.mixer, 'conv') and isinstance(block.mixer.conv, SConv1d):
926
+ # Block1D forward with cache support
927
+ residual = x
928
+ x = block.norm(x)
929
+ x = block.mixer.conv(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
930
+ if block.gamma is not None:
931
+ x = x * block.gamma.unsqueeze(-1)
932
+ x = residual + x
933
+
934
+ # FFN part
935
+ residual = x
936
+ x = block.ffn_norm(x)
937
+ x = x.permute(0, 2, 1)
938
+ x = block.ffn(x)
939
+ x = x.permute(0, 2, 1)
940
+ if block.ffn_gamma is not None:
941
+ x = x * block.ffn_gamma.unsqueeze(-1)
942
+ x = residual + x
943
+ else:
944
+ x = block(x)
945
+
946
+ return self.norm(x)
947
+
948
+ def forward(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
949
+ x = self.forward_features(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
950
+ x = self.head(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
951
+ return x
952
+
953
+
954
+ @dataclass
955
+ class VibeVoiceTokenizerEncoderOutput:
956
+ """
957
+ Output of VibeVoice tokenizer encoder, representing a Gaussian distribution with fixed variance.
958
+
959
+ Args:
960
+ mean (`torch.FloatTensor`): The mean parameters of the distribution.
961
+ std (`float` or `torch.FloatTensor`): Fixed standard deviation value.
962
+ """
963
+ mean: torch.Tensor
964
+ std: Optional[Union[float, torch.Tensor]] = None
965
+
966
+ def sample(self, dist_type='fix'):
967
+ """
968
+ Sample from the distribution.
969
+
970
+ Args:
971
+ dist_type (`str`): Sampling method, either 'fix' or 'gaussian'.
972
+
973
+ Returns:
974
+ `torch.FloatTensor`: Sampled values.
975
+ `torch.FloatTensor` (optional): Standard deviation used (only when dist_type='gaussian').
976
+ """
977
+ if dist_type == 'fix':
978
+ x = self.mean + self.std * torch.randn_like(self.mean)
979
+ return x, self.std
980
+ elif dist_type == 'gaussian':
981
+ batch_size = self.mean.size(0)
982
+ value = self.std / 0.8
983
+ std = torch.randn(batch_size, device=self.mean.device, dtype=self.mean.dtype) * value
984
+
985
+ while std.dim() < self.mean.dim():
986
+ std = std.unsqueeze(-1)
987
+
988
+ x = self.mean + std * torch.randn_like(self.mean)
989
+ return x, std
990
+ else:
991
+ return self.mean, self.std
992
+
993
+ def kl(self):
994
+ """Compute KL divergence between this distribution and a standard normal."""
995
+ target = torch.zeros_like(self.mean)
996
+ return F.mse_loss(self.mean, target, reduction='none')
997
+
998
+ def mode(self):
999
+ """Return the distribution mode (which is the mean for Gaussian)."""
1000
+ return self.mean
1001
+
1002
+ class VibeVoiceAcousticTokenizerModel(PreTrainedModel):
1003
+ """VibeVoice speech tokenizer model combining encoder and decoder for acoustic tokens"""
1004
+
1005
+ config_class = VibeVoiceAcousticTokenizerConfig
1006
+ base_model_prefix = "vibevoice_acoustic_tokenizer"
1007
+ _supports_flash_attn_2 = True
1008
+ _supports_sdpa = True
1009
+ _no_split_modules = ["TokenizerEncoder", "TokenizerDecoder"]
1010
+
1011
+ def __init__(self, config):
1012
+ super().__init__(config)
1013
+
1014
+ self.register_buffer('fix_std', torch.tensor(config.fix_std), persistent=False)
1015
+ self.std_dist_type = getattr(config, "std_dist_type", "fix")
1016
+
1017
+ # Parse encoder depths
1018
+ if isinstance(config.encoder_depths, str):
1019
+ encoder_depths = [int(d) for d in config.encoder_depths.split('-')]
1020
+ else:
1021
+ encoder_depths = config.encoder_depths
1022
+
1023
+ # Parse decoder depths if provided
1024
+ if config.decoder_depths is not None and isinstance(config.decoder_depths, str):
1025
+ decoder_depths = [int(d) for d in config.decoder_depths.split('-')]
1026
+ else:
1027
+ # Default: use reversed encoder depths if decoder_depths is None
1028
+ decoder_depths = list(reversed(encoder_depths))
1029
+
1030
+ # Create encoder config
1031
+ encoder_config = copy.deepcopy(config)
1032
+ encoder_config.dimension = config.vae_dim
1033
+ encoder_config.n_filters = config.encoder_n_filters
1034
+ encoder_config.ratios = config.encoder_ratios
1035
+ encoder_config.depths = encoder_depths
1036
+ encoder_config.norm = config.conv_norm
1037
+ encoder_config.pad_mode = config.pad_mode
1038
+ encoder_config.bias = config.conv_bias
1039
+ encoder_config.layernorm_eps = config.layernorm_eps
1040
+ encoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
1041
+ encoder_config.mixer_layer = config.mixer_layer
1042
+ encoder_config.layer_scale_init_value = config.layer_scale_init_value
1043
+ encoder_config.disable_last_norm = config.disable_last_norm
1044
+
1045
+ # Create decoder config
1046
+ decoder_config = copy.deepcopy(config)
1047
+ decoder_config.dimension = config.vae_dim
1048
+ decoder_config.n_filters = config.decoder_n_filters
1049
+ decoder_config.ratios = config.decoder_ratios
1050
+ decoder_config.depths = decoder_depths
1051
+ decoder_config.norm = config.conv_norm
1052
+ decoder_config.pad_mode = config.pad_mode
1053
+ decoder_config.bias = config.conv_bias
1054
+ decoder_config.layernorm_eps = config.layernorm_eps
1055
+ decoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
1056
+ decoder_config.mixer_layer = config.mixer_layer
1057
+ decoder_config.layer_scale_init_value = config.layer_scale_init_value
1058
+ decoder_config.disable_last_norm = config.disable_last_norm
1059
+
1060
+ # Initialize encoder and decoder
1061
+ self.encoder = TokenizerEncoder(encoder_config)
1062
+ self.decoder = TokenizerDecoder(decoder_config)
1063
+
1064
+ # Initialize weights
1065
+ self.apply(self._init_weights)
1066
+
1067
+ def _init_weights(self, module):
1068
+ """Initialize weights for the model"""
1069
+ if isinstance(module, nn.Linear):
1070
+ nn.init.normal_(module.weight, std=self.config.weight_init_value)
1071
+ if module.bias is not None:
1072
+ nn.init.zeros_(module.bias)
1073
+ elif isinstance(module, nn.LayerNorm):
1074
+ nn.init.ones_(module.weight)
1075
+ nn.init.zeros_(module.bias)
1076
+ elif isinstance(module, nn.Conv1d):
1077
+ nn.init.normal_(module.weight, std=self.config.weight_init_value)
1078
+ if module.bias is not None:
1079
+ nn.init.zeros_(module.bias)
1080
+
1081
+ @torch.no_grad()
1082
+ def encode(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
1083
+ """Convert audio to latent representations"""
1084
+ latents = self.encoder(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
1085
+ return VibeVoiceTokenizerEncoderOutput(mean=latents.permute(0, 2, 1), std=self.fix_std)
1086
+
1087
+ @torch.no_grad()
1088
+ def sampling(self, encoder_output, dist_type=None):
1089
+ """Sample from the encoder output distribution"""
1090
+ dist_type = dist_type or self.std_dist_type
1091
+
1092
+ if dist_type == 'fix':
1093
+ return encoder_output.sample(dist_type='fix')
1094
+ elif dist_type == 'gaussian':
1095
+ return encoder_output.sample(dist_type='gaussian')
1096
+ else:
1097
+ raise ValueError(f"Unsupported dist_type: {dist_type}, expected 'fix' or 'gaussian'")
1098
+
1099
+ @torch.no_grad()
1100
+ def decode(self, latents, cache=None, sample_indices=None, use_cache=False, debug=False):
1101
+ """Convert latent representations back to audio"""
1102
+ if latents.shape[1] == self.config.vae_dim:
1103
+ pass
1104
+ else:
1105
+ latents = latents.permute(0, 2, 1)
1106
+
1107
+ audio = self.decoder(latents, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
1108
+ return audio
1109
+
1110
+ def forward(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
1111
+ """Full forward pass: encode audio to latents, then decode back to audio"""
1112
+ encoder_output = self.encode(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
1113
+ sampled_latents, _ = self.sampling(encoder_output)
1114
+ reconstructed = self.decode(sampled_latents, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
1115
+ return reconstructed, sampled_latents
1116
+
1117
+
1118
+ class VibeVoiceSemanticTokenizerModel(PreTrainedModel):
1119
+ """VibeVoice speech tokenizer model with only encoder for semantic tokens"""
1120
+
1121
+ config_class = VibeVoiceSemanticTokenizerConfig
1122
+ base_model_prefix = "vibevoice_semantic_tokenizer"
1123
+ _supports_flash_attn_2 = True
1124
+ _supports_sdpa = True
1125
+ _no_split_modules = ["TokenizerEncoder"]
1126
+
1127
+ def __init__(self, config):
1128
+ super().__init__(config)
1129
+
1130
+ # Parse encoder depths
1131
+ if isinstance(config.encoder_depths, str):
1132
+ encoder_depths = [int(d) for d in config.encoder_depths.split('-')]
1133
+ else:
1134
+ encoder_depths = config.encoder_depths
1135
+
1136
+ # Create encoder config
1137
+ encoder_config = copy.deepcopy(config)
1138
+ encoder_config.dimension = config.vae_dim
1139
+ encoder_config.n_filters = config.encoder_n_filters
1140
+ encoder_config.ratios = config.encoder_ratios
1141
+ encoder_config.depths = encoder_depths
1142
+ encoder_config.norm = config.conv_norm
1143
+ encoder_config.pad_mode = config.pad_mode
1144
+ encoder_config.bias = config.conv_bias
1145
+ encoder_config.layernorm_eps = config.layernorm_eps
1146
+ encoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
1147
+ encoder_config.mixer_layer = config.mixer_layer
1148
+ encoder_config.layer_scale_init_value = config.layer_scale_init_value
1149
+ encoder_config.disable_last_norm = config.disable_last_norm
1150
+
1151
+ # Initialize encoder and decoder
1152
+ self.encoder = TokenizerEncoder(encoder_config)
1153
+
1154
+ # Initialize weights
1155
+ self.apply(self._init_weights)
1156
+
1157
+ def _init_weights(self, module):
1158
+ """Initialize weights for the model"""
1159
+ if isinstance(module, nn.Linear):
1160
+ nn.init.normal_(module.weight, std=self.config.weight_init_value)
1161
+ if module.bias is not None:
1162
+ nn.init.zeros_(module.bias)
1163
+ elif isinstance(module, nn.LayerNorm):
1164
+ nn.init.ones_(module.weight)
1165
+ nn.init.zeros_(module.bias)
1166
+ elif isinstance(module, nn.Conv1d):
1167
+ nn.init.normal_(module.weight, std=self.config.weight_init_value)
1168
+ if module.bias is not None:
1169
+ nn.init.zeros_(module.bias)
1170
+
1171
+ @torch.no_grad()
1172
+ def encode(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
1173
+ """Convert audio to latent representations"""
1174
+ latents = self.encoder(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
1175
+ return VibeVoiceTokenizerEncoderOutput(mean=latents.permute(0, 2, 1))
1176
+
1177
+ @torch.no_grad()
1178
+ def sampling(self, encoder_output, dist_type=None):
1179
+ """Sample from the encoder output distribution"""
1180
+ return encoder_output.sample(dist_type='none')
1181
+
1182
+ def forward(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
1183
+ """Full forward pass: encode audio to latents, then decode back to audio"""
1184
+ encoder_output = self.encode(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
1185
+ sampled_latents, _ = self.sampling(encoder_output, dist_type='none')
1186
+ return None, sampled_latents
1187
+
1188
+ AutoModel.register(VibeVoiceAcousticTokenizerConfig, VibeVoiceAcousticTokenizerModel)
1189
+ AutoModel.register(VibeVoiceSemanticTokenizerConfig, VibeVoiceSemanticTokenizerModel)
1190
+
1191
+ __all__ = [
1192
+ "VibeVoiceTokenizerStreamingCache",
1193
+ "VibeVoiceAcousticTokenizerModel",
1194
+ "VibeVoiceSemanticTokenizerModel",
1195
+ ]
VibeVoice-finetuning/src/vibevoice/modular/streamer.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import torch
4
+
5
+ import asyncio
6
+ from queue import Queue
7
+ from typing import TYPE_CHECKING, Optional
8
+
9
+
10
+ from transformers.generation import BaseStreamer
11
+
12
+
13
+ class AudioStreamer(BaseStreamer):
14
+ """
15
+ Audio streamer that stores audio chunks in queues for each sample in the batch.
16
+ This allows streaming audio generation for multiple samples simultaneously.
17
+
18
+ Parameters:
19
+ batch_size (`int`):
20
+ The batch size for generation
21
+ stop_signal (`any`, *optional*):
22
+ The signal to put in the queue when generation ends. Defaults to None.
23
+ timeout (`float`, *optional*):
24
+ The timeout for the audio queue. If `None`, the queue will block indefinitely.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ batch_size: int,
30
+ stop_signal: Optional[any] = None,
31
+ timeout: Optional[float] = None,
32
+ ):
33
+ self.batch_size = batch_size
34
+ self.stop_signal = stop_signal
35
+ self.timeout = timeout
36
+
37
+ # Create a queue for each sample in the batch
38
+ self.audio_queues = [Queue() for _ in range(batch_size)]
39
+ self.finished_flags = [False for _ in range(batch_size)]
40
+ self.sample_indices_map = {} # Maps from sample index to queue index
41
+
42
+ def put(self, audio_chunks: torch.Tensor, sample_indices: torch.Tensor):
43
+ """
44
+ Receives audio chunks and puts them in the appropriate queues.
45
+
46
+ Args:
47
+ audio_chunks: Tensor of shape (num_samples, ...) containing audio chunks
48
+ sample_indices: Tensor indicating which samples these chunks belong to
49
+ """
50
+ for i, sample_idx in enumerate(sample_indices):
51
+ idx = sample_idx.item()
52
+ if idx < self.batch_size and not self.finished_flags[idx]:
53
+ # Convert to numpy or keep as tensor based on preference
54
+ audio_chunk = audio_chunks[i].detach().cpu()
55
+ self.audio_queues[idx].put(audio_chunk, timeout=self.timeout)
56
+
57
+ def end(self, sample_indices: Optional[torch.Tensor] = None):
58
+ """
59
+ Signals the end of generation for specified samples or all samples.
60
+
61
+ Args:
62
+ sample_indices: Optional tensor of sample indices to end. If None, ends all.
63
+ """
64
+ if sample_indices is None:
65
+ # End all samples
66
+ for idx in range(self.batch_size):
67
+ if not self.finished_flags[idx]:
68
+ self.audio_queues[idx].put(self.stop_signal, timeout=self.timeout)
69
+ self.finished_flags[idx] = True
70
+ else:
71
+ # End specific samples
72
+ for sample_idx in sample_indices:
73
+ idx = sample_idx.item() if torch.is_tensor(sample_idx) else sample_idx
74
+ if idx < self.batch_size and not self.finished_flags[idx]:
75
+ self.audio_queues[idx].put(self.stop_signal, timeout=self.timeout)
76
+ self.finished_flags[idx] = True
77
+
78
+ def __iter__(self):
79
+ """Returns an iterator over the batch of audio streams."""
80
+ return AudioBatchIterator(self)
81
+
82
+ def get_stream(self, sample_idx: int):
83
+ """Get the audio stream for a specific sample."""
84
+ if sample_idx >= self.batch_size:
85
+ raise ValueError(f"Sample index {sample_idx} exceeds batch size {self.batch_size}")
86
+ return AudioSampleIterator(self, sample_idx)
87
+
88
+
89
+ class AudioSampleIterator:
90
+ """Iterator for a single audio stream from the batch."""
91
+
92
+ def __init__(self, streamer: AudioStreamer, sample_idx: int):
93
+ self.streamer = streamer
94
+ self.sample_idx = sample_idx
95
+
96
+ def __iter__(self):
97
+ return self
98
+
99
+ def __next__(self):
100
+ value = self.streamer.audio_queues[self.sample_idx].get(timeout=self.streamer.timeout)
101
+ if value == self.streamer.stop_signal:
102
+ raise StopIteration()
103
+ return value
104
+
105
+
106
+ class AudioBatchIterator:
107
+ """Iterator that yields audio chunks for all samples in the batch."""
108
+
109
+ def __init__(self, streamer: AudioStreamer):
110
+ self.streamer = streamer
111
+ self.active_samples = set(range(streamer.batch_size))
112
+
113
+ def __iter__(self):
114
+ return self
115
+
116
+ def __next__(self):
117
+ if not self.active_samples:
118
+ raise StopIteration()
119
+
120
+ batch_chunks = {}
121
+ samples_to_remove = set()
122
+
123
+ # Try to get chunks from all active samples
124
+ for idx in self.active_samples:
125
+ try:
126
+ value = self.streamer.audio_queues[idx].get(block=False)
127
+ if value == self.streamer.stop_signal:
128
+ samples_to_remove.add(idx)
129
+ else:
130
+ batch_chunks[idx] = value
131
+ except:
132
+ # Queue is empty for this sample, skip it this iteration
133
+ pass
134
+
135
+ # Remove finished samples
136
+ self.active_samples -= samples_to_remove
137
+
138
+ if batch_chunks:
139
+ return batch_chunks
140
+ elif self.active_samples:
141
+ # If no chunks were ready but we still have active samples,
142
+ # wait a bit and try again
143
+ import time
144
+ time.sleep(0.01)
145
+ return self.__next__()
146
+ else:
147
+ raise StopIteration()
148
+
149
+
150
+ class AsyncAudioStreamer(AudioStreamer):
151
+ """
152
+ Async version of AudioStreamer for use in async contexts.
153
+ """
154
+
155
+ def __init__(
156
+ self,
157
+ batch_size: int,
158
+ stop_signal: Optional[any] = None,
159
+ timeout: Optional[float] = None,
160
+ ):
161
+ super().__init__(batch_size, stop_signal, timeout)
162
+ # Replace regular queues with async queues
163
+ self.audio_queues = [asyncio.Queue() for _ in range(batch_size)]
164
+ self.loop = asyncio.get_running_loop()
165
+
166
+ def put(self, audio_chunks: torch.Tensor, sample_indices: torch.Tensor):
167
+ """Put audio chunks in the appropriate async queues."""
168
+ for i, sample_idx in enumerate(sample_indices):
169
+ idx = sample_idx.item()
170
+ if idx < self.batch_size and not self.finished_flags[idx]:
171
+ audio_chunk = audio_chunks[i].detach().cpu()
172
+ self.loop.call_soon_threadsafe(
173
+ self.audio_queues[idx].put_nowait, audio_chunk
174
+ )
175
+
176
+ def end(self, sample_indices: Optional[torch.Tensor] = None):
177
+ """Signal the end of generation for specified samples."""
178
+ if sample_indices is None:
179
+ indices_to_end = range(self.batch_size)
180
+ else:
181
+ indices_to_end = [s.item() if torch.is_tensor(s) else s for s in sample_indices]
182
+
183
+ for idx in indices_to_end:
184
+ if idx < self.batch_size and not self.finished_flags[idx]:
185
+ self.loop.call_soon_threadsafe(
186
+ self.audio_queues[idx].put_nowait, self.stop_signal
187
+ )
188
+ self.finished_flags[idx] = True
189
+
190
+ async def get_stream(self, sample_idx: int):
191
+ """Get async iterator for a specific sample's audio stream."""
192
+ if sample_idx >= self.batch_size:
193
+ raise ValueError(f"Sample index {sample_idx} exceeds batch size {self.batch_size}")
194
+
195
+ while True:
196
+ value = await self.audio_queues[sample_idx].get()
197
+ if value == self.stop_signal:
198
+ break
199
+ yield value
200
+
201
+ def __aiter__(self):
202
+ """Returns an async iterator over all audio streams."""
203
+ return AsyncAudioBatchIterator(self)
204
+
205
+
206
+ class AsyncAudioBatchIterator:
207
+ """Async iterator for batch audio streaming."""
208
+
209
+ def __init__(self, streamer: AsyncAudioStreamer):
210
+ self.streamer = streamer
211
+ self.active_samples = set(range(streamer.batch_size))
212
+
213
+ def __aiter__(self):
214
+ return self
215
+
216
+ async def __anext__(self):
217
+ if not self.active_samples:
218
+ raise StopAsyncIteration()
219
+
220
+ batch_chunks = {}
221
+ samples_to_remove = set()
222
+
223
+ # Create tasks for all active samples
224
+ tasks = {
225
+ idx: asyncio.create_task(self._get_chunk(idx))
226
+ for idx in self.active_samples
227
+ }
228
+
229
+ # Wait for at least one chunk to be ready
230
+ done, pending = await asyncio.wait(
231
+ tasks.values(),
232
+ return_when=asyncio.FIRST_COMPLETED,
233
+ timeout=self.streamer.timeout
234
+ )
235
+
236
+ # Cancel pending tasks
237
+ for task in pending:
238
+ task.cancel()
239
+
240
+ # Process completed tasks
241
+ for idx, task in tasks.items():
242
+ if task in done:
243
+ try:
244
+ value = await task
245
+ if value == self.streamer.stop_signal:
246
+ samples_to_remove.add(idx)
247
+ else:
248
+ batch_chunks[idx] = value
249
+ except asyncio.CancelledError:
250
+ pass
251
+
252
+ self.active_samples -= samples_to_remove
253
+
254
+ if batch_chunks:
255
+ return batch_chunks
256
+ elif self.active_samples:
257
+ # Try again if we still have active samples
258
+ return await self.__anext__()
259
+ else:
260
+ raise StopAsyncIteration()
261
+
262
+ async def _get_chunk(self, idx):
263
+ """Helper to get a chunk from a specific queue."""
264
+ return await self.streamer.audio_queues[idx].get()
VibeVoice-finetuning/src/vibevoice/processor/__init__.py ADDED
File without changes
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (177 Bytes). View file
 
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (169 Bytes). View file
 
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-311.pyc ADDED
Binary file (33.3 kB). View file
 
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-312.pyc ADDED
Binary file (29.8 kB). View file
 
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_tokenizer_processor.cpython-311.pyc ADDED
Binary file (21.5 kB). View file
 
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_tokenizer_processor.cpython-312.pyc ADDED
Binary file (19.4 kB). View file
 
VibeVoice-finetuning/src/vibevoice/processor/preprocessor_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "processor_class": "VibeVoiceProcessor",
3
+ "speech_tok_compress_ratio": 3200,
4
+ "db_normalize": true,
5
+ "audio_processor": {
6
+ "feature_extractor_type": "VibeVoiceTokenizerProcessor",
7
+ "sampling_rate": 24000,
8
+ "normalize_audio": true,
9
+ "target_dB_FS": -25,
10
+ "eps": 1e-06
11
+ },
12
+ "language_model_pretrained_name": "Qwen/Qwen2.5-7B"
13
+ }
VibeVoice-finetuning/src/vibevoice/processor/vibevoice_processor.py ADDED
@@ -0,0 +1,677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import warnings
3
+ from typing import List, Optional, Union, Dict, Any, Tuple
4
+ import os
5
+ import re
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ from transformers.tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
11
+ from transformers.utils import TensorType, logging
12
+ from .vibevoice_tokenizer_processor import AudioNormalizer
13
+
14
+ logger = logging.get_logger(__name__)
15
+
16
+
17
+ class VibeVoiceProcessor:
18
+ r"""
19
+ Constructs a VibeVoice processor which wraps a VibeVoice tokenizer and audio processor into a single processor.
20
+
21
+ [`VibeVoiceProcessor`] offers all the functionalities of [`VibeVoiceTokenizer`] and [`VibeVoiceTokenizerProcessor`].
22
+ See the [`~VibeVoiceProcessor.__call__`] and [`~VibeVoiceProcessor.decode`] for more information.
23
+
24
+ Args:
25
+ tokenizer (`VibeVoiceTextTokenizer` or `VibeVoiceTextTokenizerFast`):
26
+ The tokenizer for text processing.
27
+ audio_processor (`VibeVoiceTokenizerProcessor`):
28
+ The audio processor for speech processing.
29
+ speech_tok_compress_ratio (`int`, *optional*, defaults to 3200):
30
+ The compression ratio for speech tokenization.
31
+ db_normalize (`bool`, *optional*, defaults to True):
32
+ Whether to apply decibel normalization to audio inputs.
33
+ """
34
+
35
+ def __init__(self, tokenizer=None, audio_processor=None, speech_tok_compress_ratio=3200, db_normalize=True, **kwargs):
36
+ self.tokenizer = tokenizer
37
+ self.audio_processor = audio_processor
38
+ self.speech_tok_compress_ratio = speech_tok_compress_ratio
39
+ self.db_normalize = db_normalize
40
+ self.audio_normalizer = AudioNormalizer() if db_normalize else None
41
+ self.system_prompt = " Transform the text provided by various speakers into speech output, utilizing the distinct voice of each respective speaker.\n"
42
+
43
+ @classmethod
44
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
45
+ """
46
+ Instantiate a VibeVoiceProcessor from a pretrained VibeVoice processor.
47
+
48
+ Args:
49
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
50
+ This can be either:
51
+ - a string, the *model id* of a pretrained model
52
+ - a path to a *directory* containing processor config
53
+
54
+ Returns:
55
+ [`VibeVoiceProcessor`]: The processor object instantiated from pretrained model.
56
+ """
57
+ import os
58
+ import json
59
+ from .vibevoice_tokenizer_processor import VibeVoiceTokenizerProcessor
60
+ from vibevoice.modular.modular_vibevoice_text_tokenizer import (
61
+ VibeVoiceTextTokenizer,
62
+ VibeVoiceTextTokenizerFast
63
+ )
64
+
65
+ # Load processor configuration
66
+ config_path = os.path.join(pretrained_model_name_or_path, "preprocessor_config.json")
67
+ if os.path.exists(config_path):
68
+ with open(config_path, 'r') as f:
69
+ config = json.load(f)
70
+ else:
71
+ logger.warning(f"No preprocessor_config.json found at {pretrained_model_name_or_path}, using defaults")
72
+ config = {
73
+ "speech_tok_compress_ratio": 3200,
74
+ "db_normalize": True,
75
+ }
76
+
77
+ # Extract main processor parameters
78
+ speech_tok_compress_ratio = config.get("speech_tok_compress_ratio", 3200)
79
+ db_normalize = config.get("db_normalize", True)
80
+
81
+ # Load tokenizer - try from model path first, then fallback to Qwen
82
+ language_model_pretrained_name = config.get("language_model_pretrained_name", None) or kwargs.pop("language_model_pretrained_name", "Qwen/Qwen2.5-1.5B")
83
+ logger.info(f"Loading tokenizer from {language_model_pretrained_name}")
84
+ if 'qwen' in language_model_pretrained_name.lower():
85
+ tokenizer = VibeVoiceTextTokenizerFast.from_pretrained(
86
+ language_model_pretrained_name,
87
+ **kwargs
88
+ )
89
+ else:
90
+ raise ValueError(f"Unsupported tokenizer type for {language_model_pretrained_name}. Supported types: Qwen, Llama, Gemma.")
91
+
92
+ # Load audio processor
93
+ if "audio_processor" in config:
94
+ # Create audio processor from config
95
+ audio_config = config["audio_processor"]
96
+ audio_processor = VibeVoiceTokenizerProcessor(
97
+ sampling_rate=audio_config.get("sampling_rate", 24000),
98
+ normalize_audio=audio_config.get("normalize_audio", True),
99
+ target_dB_FS=audio_config.get("target_dB_FS", -25),
100
+ eps=audio_config.get("eps", 1e-6),
101
+ )
102
+ else:
103
+ # Create default audio processor
104
+ audio_processor = VibeVoiceTokenizerProcessor()
105
+
106
+ # Create and return the processor
107
+ return cls(
108
+ tokenizer=tokenizer,
109
+ audio_processor=audio_processor,
110
+ speech_tok_compress_ratio=speech_tok_compress_ratio,
111
+ db_normalize=db_normalize,
112
+ )
113
+
114
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
115
+ """
116
+ Save a processor to a directory, so that it can be re-loaded using the
117
+ [`~VibeVoiceProcessor.from_pretrained`] class method.
118
+
119
+ Args:
120
+ save_directory (`str` or `os.PathLike`):
121
+ Directory where the processor will be saved.
122
+ """
123
+ import os
124
+ import json
125
+
126
+ os.makedirs(save_directory, exist_ok=True)
127
+
128
+ # Save processor configuration
129
+ processor_config = {
130
+ "processor_class": "VibeVoiceProcessor",
131
+ "speech_tok_compress_ratio": self.speech_tok_compress_ratio,
132
+ "db_normalize": self.db_normalize,
133
+ "audio_processor": {
134
+ "feature_extractor_type": "VibeVoiceTokenizerProcessor",
135
+ "sampling_rate": getattr(self.audio_processor, 'sampling_rate', 24000),
136
+ "normalize_audio": getattr(self.audio_processor, 'normalize_audio', True),
137
+ "target_dB_FS": getattr(self.audio_processor, 'target_dB_FS', -25),
138
+ "eps": getattr(self.audio_processor, 'eps', 1e-6),
139
+ }
140
+ }
141
+
142
+ config_path = os.path.join(save_directory, "preprocessor_config.json")
143
+ with open(config_path, 'w') as f:
144
+ json.dump(processor_config, f, indent=2)
145
+
146
+ logger.info(f"Processor configuration saved in {config_path}")
147
+
148
+ def __call__(
149
+ self,
150
+ text: Optional[Union[str, List[str], TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
151
+ voice_samples: Optional[Union[List[Union[str, np.ndarray]], List[List[Union[str, np.ndarray]]]]] = None,
152
+ padding: Union[bool, str, PaddingStrategy] = True,
153
+ truncation: Union[bool, str, TruncationStrategy] = False,
154
+ max_length: Optional[int] = None,
155
+ return_tensors: Optional[Union[str, TensorType]] = None,
156
+ return_attention_mask: bool = True,
157
+ **kwargs,
158
+ ) -> BatchEncoding:
159
+ """
160
+ Main method to process one or more podcast scripts with optional voice samples.
161
+
162
+ Args:
163
+ text (`str`, `List[str]`):
164
+ The input text(s) to process. Can be:
165
+ - A single script string
166
+ - A list of script strings for batch processing
167
+ - A path to a .json or .txt file
168
+ - A list of paths
169
+ voice_samples (`List[Union[str, np.ndarray]]`, `List[List[Union[str, np.ndarray]]]`, *optional*):
170
+ Voice samples for each script. Can be:
171
+ - A list of samples for a single script
172
+ - A list of lists for batch processing
173
+ padding (`bool`, `str` or `PaddingStrategy`, defaults to `True`):
174
+ Whether to pad sequences to the same length
175
+ truncation (`bool`, `str` or `TruncationStrategy`, defaults to `False`):
176
+ Whether to truncate sequences
177
+ max_length (`int`, *optional*):
178
+ Maximum length of the returned sequences
179
+ return_tensors (`str` or `TensorType`, *optional*):
180
+ If set, will return tensors of a particular framework
181
+ return_attention_mask (`bool`, defaults to `True`):
182
+ Whether to return the attention mask
183
+
184
+ Returns:
185
+ `BatchEncoding`: A BatchEncoding with the following fields:
186
+ - **input_ids** -- List of token id sequences or tensor
187
+ - **attention_mask** -- List of attention masks or tensor
188
+ - **speech_tensors** -- Padded speech inputs (if voice_samples provided)
189
+ - **speech_masks** -- Speech masks (if voice_samples provided)
190
+ - **speech_input_mask** -- Boolean masks indicating speech token positions
191
+ """
192
+ # Handle single vs batch input
193
+ if isinstance(text, str) or (isinstance(text, list) and len(text) > 0 and not isinstance(text[0], str)):
194
+ # Single input
195
+ texts = [text]
196
+ is_batched = False
197
+ else:
198
+ # Batch input
199
+ texts = text
200
+ is_batched = True
201
+
202
+ # Handle voice samples
203
+ if voice_samples is not None:
204
+ if not is_batched or (isinstance(voice_samples[0], (str, np.ndarray))):
205
+ # Single set of voice samples
206
+ voice_samples_list = [voice_samples]
207
+ else:
208
+ # Batch of voice samples
209
+ voice_samples_list = voice_samples
210
+ else:
211
+ voice_samples_list = [None] * len(texts)
212
+
213
+ # Process each input
214
+ all_encodings = []
215
+ for text_input, voice_input in zip(texts, voice_samples_list):
216
+ encoding = self._process_single(text_input, voice_input)
217
+ all_encodings.append(encoding)
218
+
219
+ # Combine batch
220
+ batch_encoding = self._batch_encode(
221
+ all_encodings,
222
+ padding=padding,
223
+ truncation=truncation,
224
+ max_length=max_length,
225
+ return_tensors=return_tensors,
226
+ return_attention_mask=return_attention_mask,
227
+ )
228
+
229
+ return batch_encoding
230
+
231
+ def _process_single(
232
+ self,
233
+ text: Union[str, TextInput],
234
+ voice_samples: Optional[List[Union[str, np.ndarray]]] = None,
235
+ ) -> Dict[str, Any]:
236
+ """Process a single podcast script."""
237
+ # Determine if text is a file path or direct script
238
+ script = None
239
+ if isinstance(text, str):
240
+ # Check if it's a file path
241
+ if text.endswith('.json') and os.path.exists(text):
242
+ script = self._convert_json_to_script(text)
243
+ elif text.endswith('.txt') and os.path.exists(text):
244
+ script = self._convert_text_to_script(text)
245
+ else:
246
+ # Assume it's the script content directly
247
+ script = text
248
+
249
+ if script is None:
250
+ raise ValueError(f"Could not process input text: {text}")
251
+
252
+ # Parse the script
253
+ parsed_lines = self._parse_script(script)
254
+ all_speakers = list(set(speaker_id for speaker_id, _ in parsed_lines))
255
+
256
+ # Create system prompt
257
+ # system_tokens = self.tokenizer.encode(self.system_prompt, add_special_tokens=False)
258
+ system_tokens = self.tokenizer.encode(self.system_prompt)
259
+
260
+ # Process voice samples if provided
261
+ if voice_samples:
262
+ voice_tokens, voice_speech_inputs, voice_speech_masks = self._create_voice_prompt(voice_samples[:len(all_speakers)])
263
+ else:
264
+ voice_tokens, voice_speech_inputs, voice_speech_masks = [], [], []
265
+
266
+ # Build full token sequence
267
+ full_tokens = system_tokens + voice_tokens
268
+ speech_input_mask = [False] * len(system_tokens) + voice_speech_masks
269
+
270
+ # Add text input section
271
+ full_tokens += self.tokenizer.encode(' Text input:\n', add_special_tokens=False)
272
+ speech_input_mask += [False] * len(self.tokenizer.encode(' Text input:\n', add_special_tokens=False))
273
+
274
+ for speaker_id, speaker_text in parsed_lines:
275
+ speaker_text_tokens = self.tokenizer.encode(f" Speaker {speaker_id}:{speaker_text}\n", add_special_tokens=False)
276
+ full_tokens += speaker_text_tokens
277
+ speech_input_mask += [False] * len(speaker_text_tokens)
278
+
279
+ # Add speech output section
280
+ full_tokens += self.tokenizer.encode(' Speech output:\n', add_special_tokens=False) + [self.tokenizer.speech_start_id]
281
+ speech_input_mask += [False] * (len(self.tokenizer.encode(' Speech output:\n', add_special_tokens=False)) + 1)
282
+
283
+ return {
284
+ "input_ids": full_tokens,
285
+ "speech_inputs": voice_speech_inputs if voice_speech_inputs else None,
286
+ "speech_input_mask": speech_input_mask,
287
+ "parsed_script": parsed_lines,
288
+ "all_speakers": all_speakers,
289
+ }
290
+
291
+ def _batch_encode(
292
+ self,
293
+ encodings: List[Dict[str, Any]],
294
+ padding: Union[bool, str, PaddingStrategy] = True,
295
+ truncation: Union[bool, str, TruncationStrategy] = False,
296
+ max_length: Optional[int] = None,
297
+ return_tensors: Optional[Union[str, TensorType]] = None,
298
+ return_attention_mask: bool = True,
299
+ ) -> BatchEncoding:
300
+ """Combine multiple encodings into a batch with padding."""
301
+ # Extract input_ids and create attention_mask
302
+ input_ids_list = [enc["input_ids"] for enc in encodings]
303
+ speech_input_masks_list = [enc["speech_input_mask"] for enc in encodings]
304
+
305
+ # Determine padding strategy
306
+ if isinstance(padding, bool):
307
+ padding_strategy = PaddingStrategy.LONGEST if padding else PaddingStrategy.DO_NOT_PAD
308
+ elif isinstance(padding, str):
309
+ padding_strategy = PaddingStrategy(padding)
310
+ else:
311
+ padding_strategy = padding
312
+
313
+ # Apply padding to input_ids
314
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD:
315
+ if padding_strategy == PaddingStrategy.LONGEST:
316
+ max_len = max(len(ids) for ids in input_ids_list)
317
+ elif padding_strategy == PaddingStrategy.MAX_LENGTH and max_length is not None:
318
+ max_len = max_length
319
+ else:
320
+ max_len = max(len(ids) for ids in input_ids_list)
321
+
322
+ # Pad sequences
323
+ padded_input_ids = []
324
+ attention_masks = []
325
+ padded_speech_input_masks = []
326
+
327
+ for input_ids, speech_mask in zip(input_ids_list, speech_input_masks_list):
328
+ # Truncate if needed
329
+ if truncation and len(input_ids) > max_len:
330
+ input_ids = input_ids[:max_len]
331
+ speech_mask = speech_mask[:max_len]
332
+
333
+ # Pad
334
+ padding_length = max_len - len(input_ids)
335
+ # padded_ids = [self.tokenizer.pad_token_id] * padding_length + input_ids
336
+ padded_ids = [self.tokenizer.pad_id] * padding_length + input_ids
337
+ attention_mask = [0] * padding_length + [1] * len(input_ids)
338
+ padded_speech_mask = [False] * padding_length + speech_mask
339
+
340
+ padded_input_ids.append(padded_ids)
341
+ attention_masks.append(attention_mask)
342
+ padded_speech_input_masks.append(padded_speech_mask)
343
+
344
+ input_ids_list = padded_input_ids
345
+ speech_input_masks_list = padded_speech_input_masks
346
+ else:
347
+ # No padding, just create attention masks
348
+ attention_masks = [[1] * len(ids) for ids in input_ids_list] if return_attention_mask else None
349
+
350
+ # Process speech inputs
351
+ all_speech_inputs = []
352
+ has_speech = False
353
+ for enc in encodings:
354
+ if enc["speech_inputs"] is not None:
355
+ all_speech_inputs.extend(enc["speech_inputs"])
356
+ has_speech = True
357
+
358
+ # Prepare batch encoding
359
+ batch_encoding = BatchEncoding()
360
+
361
+ # Handle tensor conversion
362
+ if return_tensors is not None:
363
+ batch_encoding["input_ids"] = torch.tensor(input_ids_list, dtype=torch.long)
364
+ if return_attention_mask and attention_masks is not None:
365
+ batch_encoding["attention_mask"] = torch.tensor(attention_masks, dtype=torch.long)
366
+ batch_encoding["speech_input_mask"] = torch.tensor(speech_input_masks_list, dtype=torch.bool)
367
+ else:
368
+ batch_encoding["input_ids"] = input_ids_list
369
+ if return_attention_mask and attention_masks is not None:
370
+ batch_encoding["attention_mask"] = attention_masks
371
+ batch_encoding["speech_input_mask"] = speech_input_masks_list
372
+
373
+ # Process speech tensors if present
374
+ if has_speech:
375
+ speech_dict = self.prepare_speech_inputs(
376
+ all_speech_inputs,
377
+ return_tensors=return_tensors,
378
+ )
379
+ batch_encoding["speech_tensors"] = speech_dict["padded_speeches"]
380
+ batch_encoding["speech_masks"] = speech_dict["speech_masks"]
381
+ else:
382
+ batch_encoding["speech_tensors"] = None
383
+ batch_encoding["speech_masks"] = None
384
+
385
+ # Add metadata
386
+ batch_encoding["parsed_scripts"] = [enc["parsed_script"] for enc in encodings]
387
+ batch_encoding["all_speakers_list"] = [enc["all_speakers"] for enc in encodings]
388
+
389
+ return batch_encoding
390
+
391
+ def _create_voice_prompt(
392
+ self,
393
+ speaker_samples: List[Union[str, np.ndarray]]
394
+ ) -> Tuple[List[int], List[np.ndarray], List[bool]]:
395
+ """
396
+ Create voice prompt tokens and process audio samples.
397
+
398
+ Returns:
399
+ tuple: (voice_tokens, voice_speech_inputs, voice_speech_masks)
400
+ """
401
+ vae_token_id = self.tokenizer.speech_diffusion_id
402
+
403
+ voice_full_tokens = self.tokenizer.encode(' Voice input:\n', add_special_tokens=False)
404
+ voice_speech_inputs = []
405
+ voice_speech_masks = [False] * len(voice_full_tokens)
406
+
407
+ for speaker_id, speaker_audio in enumerate(speaker_samples):
408
+ prefix_tokens = self.tokenizer.encode(f" Speaker {speaker_id}:", add_special_tokens=False)
409
+
410
+ # Process audio
411
+ if isinstance(speaker_audio, str):
412
+ # Load audio from file
413
+ wav = self.audio_processor._load_audio_from_path(speaker_audio)
414
+ else:
415
+ wav = np.array(speaker_audio, dtype=np.float32)
416
+
417
+ # Apply normalization if needed
418
+ if self.db_normalize and self.audio_normalizer:
419
+ wav = self.audio_normalizer(wav)
420
+
421
+ # Calculate token length based on compression ratio
422
+ # if speaker_audio.endswith('.pt') or speaker_audio.endswith('.npy'):
423
+ # vae_tok_len = wav.shape[0]
424
+ # else:
425
+ vae_tok_len = math.ceil(wav.shape[0] / self.speech_tok_compress_ratio)
426
+
427
+ # Build tokens and masks
428
+ speaker_tokens = (prefix_tokens +
429
+ [self.tokenizer.speech_start_id] +
430
+ [vae_token_id] * vae_tok_len +
431
+ [self.tokenizer.speech_end_id] +
432
+ self.tokenizer.encode('\n', add_special_tokens=False))
433
+
434
+ vae_input_mask = ([False] * len(prefix_tokens) +
435
+ [False] +
436
+ [True] * vae_tok_len +
437
+ [False] +
438
+ [False])
439
+
440
+ voice_full_tokens.extend(speaker_tokens)
441
+ voice_speech_masks.extend(vae_input_mask)
442
+ voice_speech_inputs.append(wav)
443
+
444
+ return voice_full_tokens, voice_speech_inputs, voice_speech_masks
445
+
446
+ def prepare_speech_inputs(
447
+ self,
448
+ speech_inputs: List[np.ndarray],
449
+ return_tensors: Optional[Union[str, TensorType]] = None,
450
+ device: Optional[Union[str, torch.device]] = None,
451
+ dtype: Optional[torch.dtype] = None,
452
+ ) -> Dict[str, Any]:
453
+ """
454
+ Prepare speech inputs for model consumption.
455
+
456
+ Args:
457
+ speech_inputs: List of speech arrays
458
+ return_tensors: Output tensor type
459
+ device: Device to place tensors on
460
+ dtype: Data type for tensors
461
+
462
+ Returns:
463
+ Dictionary with padded_speeches and speech_masks
464
+ """
465
+ if not speech_inputs:
466
+ return {"padded_speeches": None, "speech_masks": None}
467
+
468
+ # Calculate sequence lengths
469
+ vae_tok_seqlens = [math.ceil(s.shape[0] / self.speech_tok_compress_ratio) for s in speech_inputs]
470
+ # vae_tok_seqlens = [math.ceil(s.shape[0] / self.speech_tok_compress_ratio) if s.ndim == 1 else s.shape[0] for s in speech_inputs]
471
+ max_speech_length = max(s.shape[0] for s in speech_inputs)
472
+
473
+ # Pad speeches
474
+ if speech_inputs[0].ndim == 1:
475
+ padded_speeches = np.full((len(speech_inputs), max_speech_length), fill_value=0, dtype=np.float32)
476
+ else:
477
+ padded_speeches = np.full((len(speech_inputs), max_speech_length, speech_inputs[0].shape[-1]), fill_value=0, dtype=np.float32)
478
+ speech_masks = np.zeros((len(speech_inputs), max(vae_tok_seqlens)), dtype=np.bool_)
479
+
480
+ for i, (speech, vae_tok_length) in enumerate(zip(speech_inputs, vae_tok_seqlens)):
481
+ padded_speeches[i, :len(speech)] = speech
482
+ speech_masks[i, :vae_tok_length] = True
483
+
484
+ result = {
485
+ "padded_speeches": padded_speeches,
486
+ "speech_masks": speech_masks,
487
+ }
488
+
489
+ # Convert to tensors if requested
490
+ if return_tensors == "pt":
491
+ result["padded_speeches"] = torch.tensor(padded_speeches, device=device, dtype=dtype or torch.float32)
492
+ result["speech_masks"] = torch.tensor(speech_masks, device=device, dtype=torch.bool)
493
+
494
+ return result
495
+
496
+ def _convert_json_to_script(self, json_file: str) -> str:
497
+ """
498
+ Convert JSON format to script format.
499
+ Expected JSON format:
500
+ [
501
+ {"speaker": "1", "text": "Hello everyone..."},
502
+ {"speaker": "2", "text": "Great to be here..."}
503
+ ]
504
+ """
505
+ import json
506
+
507
+ with open(json_file, 'r', encoding='utf-8') as f:
508
+ data = json.load(f)
509
+
510
+ if not isinstance(data, list):
511
+ raise ValueError("JSON file must contain a list of speaker entries")
512
+
513
+ script_lines = []
514
+ for item in data:
515
+ if not isinstance(item, dict):
516
+ logger.warning(f"Skipping non-dict entry: {item}")
517
+ continue
518
+
519
+ speaker = item.get('speaker')
520
+ text = item.get('text')
521
+
522
+ if speaker is None or text is None:
523
+ logger.warning(f"Skipping entry missing speaker or text: {item}")
524
+ continue
525
+
526
+ # Ensure speaker ID is valid
527
+ try:
528
+ speaker_id = int(speaker)
529
+ except (ValueError, TypeError):
530
+ logger.warning(f"Invalid speaker ID: {speaker}, skipping entry")
531
+ continue
532
+
533
+ # Clean up text
534
+ text = text.strip()
535
+ if text:
536
+ script_lines.append(f"Speaker {speaker_id}: {text}")
537
+
538
+ if not script_lines:
539
+ raise ValueError("No valid entries found in JSON file")
540
+
541
+ return "\n".join(script_lines)
542
+
543
+ def _convert_text_to_script(self, text_file: str) -> str:
544
+ """
545
+ Convert text file to script format.
546
+ Handles multiple formats:
547
+ 1. Already formatted as "Speaker X: text"
548
+ 2. Plain text (assigns to Speaker 1)
549
+
550
+ Handles edge cases like multiple colons in a line.
551
+ """
552
+ with open(text_file, 'r', encoding='utf-8') as f:
553
+ lines = f.readlines()
554
+
555
+ script_lines = []
556
+ current_speaker = 1
557
+
558
+ for line in lines:
559
+ line = line.strip()
560
+ if not line:
561
+ continue
562
+
563
+ # Try to parse as "Speaker X: text" format
564
+ # Use regex to be more robust
565
+ speaker_match = re.match(r'^Speaker\s+(\d+)\s*:\s*(.*)$', line, re.IGNORECASE)
566
+
567
+ if speaker_match:
568
+ speaker_id = int(speaker_match.group(1))
569
+ text = speaker_match.group(2).strip()
570
+ if text:
571
+ script_lines.append(f"Speaker {speaker_id}: {text}")
572
+ else:
573
+ # Treat as plain text - assign to current speaker
574
+ script_lines.append(f"Speaker {current_speaker}: {line}")
575
+
576
+ if not script_lines:
577
+ raise ValueError("No valid content found in text file")
578
+
579
+ return "\n".join(script_lines)
580
+
581
+ def _parse_script(self, script: str) -> List[Tuple[int, str]]:
582
+ """Parse script into list of (speaker_id, text) tuples."""
583
+ lines = script.strip().split("\n")
584
+ parsed_lines = []
585
+ speaker_ids = []
586
+
587
+ # First pass: parse all lines and collect speaker IDs
588
+ for line in lines:
589
+ if not line.strip():
590
+ continue
591
+
592
+ # Use regex to handle edge cases like multiple colons
593
+ match = re.match(r'^Speaker\s+(\d+)\s*:\s*(.*)$', line.strip(), re.IGNORECASE)
594
+
595
+ if match:
596
+ speaker_id = int(match.group(1))
597
+ text = ' ' + match.group(2).strip()
598
+ parsed_lines.append((speaker_id, text))
599
+ speaker_ids.append(speaker_id)
600
+ else:
601
+ logger.warning(f"Could not parse line: '{line}'")
602
+
603
+ if not parsed_lines:
604
+ raise ValueError("No valid speaker lines found in script")
605
+
606
+ # Check if we need to normalize speaker IDs (only if all are > 0)
607
+ min_speaker_id = min(speaker_ids)
608
+ if min_speaker_id > 0:
609
+ # Normalize to start from 0
610
+ normalized_lines = []
611
+ for speaker_id, text in parsed_lines:
612
+ normalized_lines.append((speaker_id - 1, text))
613
+ return normalized_lines
614
+ else:
615
+ # Keep original IDs
616
+ return parsed_lines
617
+
618
+ def _merge_inputs(self, text_inputs: BatchEncoding, audio_inputs: Dict) -> BatchEncoding:
619
+ """Merge text and audio inputs into a single BatchEncoding."""
620
+ # Start with text inputs
621
+ merged = BatchEncoding(text_inputs)
622
+
623
+ # Add audio-specific fields
624
+ if "audio" in audio_inputs:
625
+ merged["speech_inputs"] = audio_inputs["audio"]
626
+ if "streaming" in audio_inputs:
627
+ merged["streaming"] = audio_inputs["streaming"]
628
+
629
+ return merged
630
+
631
+ def batch_decode(self, *args, **kwargs):
632
+ """
633
+ This method forwards all its arguments to VibeVoiceTextTokenizer's [`~PreTrainedTokenizer.batch_decode`].
634
+ Please refer to the docstring of this method for more information.
635
+ """
636
+ return self.tokenizer.batch_decode(*args, **kwargs)
637
+
638
+ def decode(self, *args, **kwargs):
639
+ """
640
+ This method forwards all its arguments to VibeVoiceTextTokenizer's [`~PreTrainedTokenizer.decode`].
641
+ Please refer to the docstring of this method for more information.
642
+ """
643
+ return self.tokenizer.decode(*args, **kwargs)
644
+
645
+ @property
646
+ def model_input_names(self):
647
+ """
648
+ Return the list of inputs accepted by the model.
649
+ """
650
+ tokenizer_input_names = self.tokenizer.model_input_names
651
+ audio_processor_input_names = self.audio_processor.model_input_names
652
+ return list(dict.fromkeys(tokenizer_input_names + audio_processor_input_names + ["speech_inputs", "speech_input_mask"]))
653
+
654
+ def save_audio(self,
655
+ audio: Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]],
656
+ output_path: str = "output.wav",
657
+ sampling_rate: Optional[int] = None,
658
+ normalize: bool = False,
659
+ batch_prefix: str = "audio_",
660
+ ) -> str:
661
+ """
662
+ Save audio data to a file.
663
+ Args:
664
+ audio (Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]]):
665
+ The audio data to save. Can be a single tensor/array or a list of them.
666
+ output_path (str, optional): Path to save the audio file. Defaults to "output.wav".
667
+ sampling_rate (int, optional): Sampling rate for the audio. If None, uses the processor's default.
668
+ normalize (bool, optional): Whether to normalize the audio before saving. Defaults to False.
669
+ batch_prefix (str, optional): Prefix for batch audio files. Defaults to "audio_".
670
+ Returns:
671
+ str: The path to the saved audio file.
672
+ """
673
+ return self.audio_processor.save_audio(audio, output_path=output_path, sampling_rate=sampling_rate, normalize=normalize, batch_prefix=batch_prefix)
674
+
675
+ __all__ = [
676
+ "VibeVoiceProcessor",
677
+ ]
VibeVoice-finetuning/src/vibevoice/processor/vibevoice_tokenizer_processor.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Processor class for VibeVoice models.
3
+ """
4
+
5
+ import os
6
+ import json
7
+ import warnings
8
+ from typing import List, Optional, Union, Dict, Any
9
+
10
+ import numpy as np
11
+ import torch
12
+
13
+ from transformers.feature_extraction_utils import FeatureExtractionMixin
14
+ from transformers.utils import logging
15
+
16
+ logger = logging.get_logger(__name__)
17
+
18
+
19
+ class AudioNormalizer:
20
+ """
21
+ Audio normalization class for VibeVoice tokenizer.
22
+
23
+ This class provides audio normalization to ensure consistent input levels
24
+ for the VibeVoice tokenizer while maintaining audio quality.
25
+ """
26
+
27
+ def __init__(self, target_dB_FS: float = -25, eps: float = 1e-6):
28
+ """
29
+ Initialize the audio normalizer.
30
+
31
+ Args:
32
+ target_dB_FS (float): Target dB FS level for the audio. Default: -25
33
+ eps (float): Small value to avoid division by zero. Default: 1e-6
34
+ """
35
+ self.target_dB_FS = target_dB_FS
36
+ self.eps = eps
37
+
38
+ def tailor_dB_FS(self, audio: np.ndarray) -> tuple:
39
+ """
40
+ Adjust the audio to the target dB FS level.
41
+
42
+ Args:
43
+ audio (np.ndarray): Input audio signal
44
+
45
+ Returns:
46
+ tuple: (normalized_audio, rms, scalar)
47
+ """
48
+ rms = np.sqrt(np.mean(audio**2))
49
+ scalar = 10 ** (self.target_dB_FS / 20) / (rms + self.eps)
50
+ normalized_audio = audio * scalar
51
+ return normalized_audio, rms, scalar
52
+
53
+ def avoid_clipping(self, audio: np.ndarray, scalar: Optional[float] = None) -> tuple:
54
+ """
55
+ Avoid clipping by scaling down if necessary.
56
+
57
+ Args:
58
+ audio (np.ndarray): Input audio signal
59
+ scalar (float, optional): Explicit scaling factor
60
+
61
+ Returns:
62
+ tuple: (normalized_audio, scalar)
63
+ """
64
+ if scalar is None:
65
+ max_val = np.max(np.abs(audio))
66
+ if max_val > 1.0:
67
+ scalar = max_val + self.eps
68
+ else:
69
+ scalar = 1.0
70
+
71
+ return audio / scalar, scalar
72
+
73
+ def __call__(self, audio: np.ndarray) -> np.ndarray:
74
+ """
75
+ Normalize the audio by adjusting to target dB FS and avoiding clipping.
76
+
77
+ Args:
78
+ audio (np.ndarray): Input audio signal
79
+
80
+ Returns:
81
+ np.ndarray: Normalized audio signal
82
+ """
83
+ # First adjust to target dB FS
84
+ audio, _, _ = self.tailor_dB_FS(audio)
85
+ # Then avoid clipping
86
+ audio, _ = self.avoid_clipping(audio)
87
+ return audio
88
+
89
+
90
+ # Change from ProcessorMixin to FeatureExtractionMixin which is designed for single components
91
+ class VibeVoiceTokenizerProcessor(FeatureExtractionMixin):
92
+ """
93
+ Processor for VibeVoice acoustic tokenizer models.
94
+
95
+ This processor handles audio preprocessing for VibeVoice models, including:
96
+ - Audio format conversion (stereo to mono)
97
+ - Optional audio normalization
98
+ - Streaming support for infinite-length audio
99
+
100
+ Args:
101
+ sampling_rate (int, optional): Expected sampling rate. Defaults to 24000.
102
+ normalize_audio (bool, optional): Whether to normalize audio. Defaults to True.
103
+ target_dB_FS (float, optional): Target dB FS for normalization. Defaults to -25.
104
+ eps (float, optional): Small value for numerical stability. Defaults to 1e-6.
105
+ """
106
+ model_input_names = ["input_features"]
107
+
108
+ def __init__(
109
+ self,
110
+ sampling_rate: int = 24000,
111
+ normalize_audio: bool = True,
112
+ target_dB_FS: float = -25,
113
+ eps: float = 1e-6,
114
+ **kwargs,
115
+ ):
116
+ super().__init__(**kwargs)
117
+
118
+ self.sampling_rate = sampling_rate
119
+ self.normalize_audio = normalize_audio
120
+
121
+ # Initialize audio normalizer if needed
122
+ if self.normalize_audio:
123
+ self.normalizer = AudioNormalizer(target_dB_FS=target_dB_FS, eps=eps)
124
+ else:
125
+ self.normalizer = None
126
+
127
+ # Save config
128
+ self.feature_extractor_dict = {
129
+ "sampling_rate": sampling_rate,
130
+ "normalize_audio": normalize_audio,
131
+ "target_dB_FS": target_dB_FS,
132
+ "eps": eps,
133
+ }
134
+
135
+ def _ensure_mono(self, audio: np.ndarray) -> np.ndarray:
136
+ """
137
+ Convert stereo audio to mono if needed.
138
+
139
+ Args:
140
+ audio (np.ndarray): Input audio array
141
+
142
+ Returns:
143
+ np.ndarray: Mono audio array
144
+ """
145
+ if len(audio.shape) == 1:
146
+ return audio
147
+ elif len(audio.shape) == 2:
148
+ if audio.shape[0] == 2: # (2, time)
149
+ return np.mean(audio, axis=0)
150
+ elif audio.shape[1] == 2: # (time, 2)
151
+ return np.mean(audio, axis=1)
152
+ else:
153
+ # If one dimension is 1, squeeze it
154
+ if audio.shape[0] == 1:
155
+ return audio.squeeze(0)
156
+ elif audio.shape[1] == 1:
157
+ return audio.squeeze(1)
158
+ else:
159
+ raise ValueError(f"Unexpected audio shape: {audio.shape}")
160
+ else:
161
+ raise ValueError(f"Audio should be 1D or 2D, got shape: {audio.shape}")
162
+
163
+ def _process_single_audio(self, audio: Union[np.ndarray, List[float]]) -> np.ndarray:
164
+ """
165
+ Process a single audio array.
166
+
167
+ Args:
168
+ audio: Single audio input
169
+
170
+ Returns:
171
+ np.ndarray: Processed audio
172
+ """
173
+ # Convert to numpy array
174
+ if not isinstance(audio, np.ndarray):
175
+ audio = np.array(audio, dtype=np.float32)
176
+ else:
177
+ audio = audio.astype(np.float32)
178
+
179
+ # Ensure mono
180
+ audio = self._ensure_mono(audio)
181
+
182
+ # Normalize if requested
183
+ if self.normalize_audio and self.normalizer is not None:
184
+ audio = self.normalizer(audio)
185
+
186
+ return audio
187
+
188
+ def __call__(
189
+ self,
190
+ audio: Union[str, np.ndarray, List[float], List[np.ndarray], List[List[float]], List[str]] = None,
191
+ sampling_rate: Optional[int] = None,
192
+ return_tensors: Optional[str] = None,
193
+ **kwargs,
194
+ ):
195
+ """
196
+ Process audio for VibeVoice models.
197
+
198
+ Args:
199
+ audio: Audio input(s) to process. Can be:
200
+ - str: Path to audio file
201
+ - np.ndarray: Audio array
202
+ - List[float]: Audio as list of floats
203
+ - List[np.ndarray]: Batch of audio arrays
204
+ - List[str]: Batch of audio file paths
205
+ sampling_rate (int, optional): Sampling rate of the input audio
206
+ return_tensors (str, optional): Return format ('pt' for PyTorch, 'np' for NumPy)
207
+
208
+ Returns:
209
+ dict: Processed audio inputs with keys:
210
+ - input_features: Audio tensor(s) ready for the model
211
+ """
212
+ if audio is None:
213
+ raise ValueError("Audio input is required")
214
+
215
+ # Validate sampling rate
216
+ if sampling_rate is not None and sampling_rate != self.sampling_rate:
217
+ logger.warning(
218
+ f"Input sampling rate ({sampling_rate}) differs from expected "
219
+ f"sampling rate ({self.sampling_rate}). Please resample your audio."
220
+ )
221
+
222
+ # Handle different input types
223
+ if isinstance(audio, str):
224
+ # Single audio file path
225
+ audio = self._load_audio_from_path(audio)
226
+ is_batched = False
227
+ elif isinstance(audio, list):
228
+ if len(audio) == 0:
229
+ raise ValueError("Empty audio list provided")
230
+
231
+ # Check if it's a list of file paths
232
+ if all(isinstance(item, str) for item in audio):
233
+ # Batch of audio file paths
234
+ audio = [self._load_audio_from_path(path) for path in audio]
235
+ is_batched = True
236
+ else:
237
+ # Check if it's batched audio arrays
238
+ is_batched = isinstance(audio[0], (np.ndarray, list))
239
+ else:
240
+ # Single audio array or list
241
+ is_batched = False
242
+
243
+ # Process audio
244
+ if is_batched:
245
+ processed_audio = [self._process_single_audio(a) for a in audio]
246
+ else:
247
+ processed_audio = [self._process_single_audio(audio)]
248
+
249
+ # Convert to tensors if requested
250
+ if return_tensors == "pt":
251
+ if len(processed_audio) == 1:
252
+ # Create a proper batch dimension (B, T)
253
+ input_features = torch.from_numpy(processed_audio[0]).unsqueeze(0).unsqueeze(1)
254
+ else:
255
+ # For batched input with different lengths, create a batch properly
256
+ input_features = torch.stack([torch.from_numpy(a) for a in processed_audio]).unsqueeze(1)
257
+ elif return_tensors == "np":
258
+ if len(processed_audio) == 1:
259
+ input_features = processed_audio[0][np.newaxis, np.newaxis, :]
260
+ else:
261
+ input_features = np.stack(processed_audio)[:, np.newaxis, :]
262
+ else:
263
+ input_features = processed_audio[0] if len(processed_audio) == 1 else processed_audio
264
+
265
+ outputs = {
266
+ "audio": input_features, # Use "audio" instead of "input_features"
267
+ }
268
+
269
+ return outputs
270
+
271
+ def _load_audio_from_path(self, audio_path: str) -> np.ndarray:
272
+ """
273
+ Load audio from file path.
274
+
275
+ Args:
276
+ audio_path (str): Path to audio file
277
+
278
+ Returns:
279
+ np.ndarray: Loaded audio array
280
+ """
281
+ # Get file extension to determine loading method
282
+ file_ext = os.path.splitext(audio_path)[1].lower()
283
+
284
+ if file_ext in ['.wav', '.mp3', '.flac', '.m4a', '.ogg']:
285
+ # Audio file - use librosa
286
+ import librosa
287
+ audio_array, sr = librosa.load(
288
+ audio_path,
289
+ sr=self.sampling_rate,
290
+ mono=True
291
+ )
292
+ return audio_array
293
+ elif file_ext == '.pt':
294
+ # PyTorch tensor file
295
+ audio_tensor = torch.load(audio_path, map_location='cpu').squeeze()
296
+ if isinstance(audio_tensor, torch.Tensor):
297
+ audio_array = audio_tensor.numpy()
298
+ else:
299
+ audio_array = np.array(audio_tensor)
300
+ return audio_array.astype(np.float32)
301
+ elif file_ext == '.npy':
302
+ # NumPy file
303
+ audio_array = np.load(audio_path)
304
+ return audio_array.astype(np.float32)
305
+ else:
306
+ raise ValueError(
307
+ f"Unsupported file format: {file_ext}. "
308
+ f"Supported formats: .wav, .mp3, .flac, .m4a, .ogg, .pt, .npy, .npz"
309
+ )
310
+
311
+ def preprocess_audio(
312
+ self,
313
+ audio_path_or_array: Union[str, np.ndarray],
314
+ normalize: Optional[bool] = None,
315
+ ) -> np.ndarray:
316
+ """
317
+ Convenience method to preprocess audio from file path or array.
318
+ This method is kept for backward compatibility but __call__ is recommended.
319
+
320
+ Args:
321
+ audio_path_or_array: Path to audio file or numpy array
322
+ normalize: Whether to normalize (overrides default setting)
323
+
324
+ Returns:
325
+ np.ndarray: Preprocessed audio array
326
+ """
327
+ if isinstance(audio_path_or_array, str):
328
+ audio_array = self._load_audio_from_path(audio_path_or_array)
329
+ else:
330
+ audio_array = np.array(audio_path_or_array, dtype=np.float32)
331
+
332
+ # Override normalization setting if specified
333
+ original_normalize = self.normalize_audio
334
+ if normalize is not None:
335
+ self.normalize_audio = normalize
336
+
337
+ try:
338
+ processed = self._process_single_audio(audio_array)
339
+ finally:
340
+ # Restore original setting
341
+ self.normalize_audio = original_normalize
342
+
343
+ return processed
344
+
345
+ # Override to_dict method for configuration saving
346
+ def to_dict(self) -> Dict[str, Any]:
347
+ """
348
+ Convert the object to a dict containing all attributes needed for serialization.
349
+ """
350
+ return self.feature_extractor_dict
351
+
352
+ def save_audio(
353
+ self,
354
+ audio: Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]],
355
+ output_path: str = "output.wav",
356
+ sampling_rate: Optional[int] = None,
357
+ normalize: bool = False,
358
+ batch_prefix: str = "audio_",
359
+ ):
360
+ """
361
+ Save audio data to WAV file(s).
362
+
363
+ Args:
364
+ audio: Audio data to save. Can be:
365
+ - torch.Tensor: PyTorch tensor with shape (B, C, T) or (B, T) or (T)
366
+ - np.ndarray: NumPy array with shape (B, C, T) or (B, T) or (T)
367
+ - List of tensors or arrays
368
+ output_path: Path where to save the audio. If saving multiple files,
369
+ this is treated as a directory and individual files will be saved inside.
370
+ sampling_rate: Sampling rate for the saved audio. Defaults to the processor's rate.
371
+ normalize: Whether to normalize audio before saving.
372
+ batch_prefix: Prefix for batch files when saving multiple audios.
373
+
374
+ Returns:
375
+ List[str]: Paths to the saved audio files.
376
+ """
377
+ if sampling_rate is None:
378
+ sampling_rate = self.sampling_rate
379
+
380
+ try:
381
+ import soundfile as sf
382
+ except ImportError:
383
+ raise ImportError(
384
+ "soundfile is required to save audio files. "
385
+ "Install it with: pip install soundfile"
386
+ )
387
+
388
+ # Ensure audio is in the right format
389
+ if isinstance(audio, torch.Tensor):
390
+ # Convert PyTorch tensor to numpy
391
+ audio_np = audio.float().detach().cpu().numpy()
392
+ elif isinstance(audio, np.ndarray):
393
+ audio_np = audio
394
+ elif isinstance(audio, list):
395
+ # Handle list of tensors or arrays
396
+ if all(isinstance(a, torch.Tensor) for a in audio):
397
+ audio_np = [a.float().detach().cpu().numpy() for a in audio]
398
+ else:
399
+ audio_np = audio
400
+ else:
401
+ raise ValueError(f"Unsupported audio type: {type(audio)}")
402
+
403
+ saved_paths = []
404
+
405
+ # Handle based on shape or type
406
+ if isinstance(audio_np, list):
407
+ # Multiple separate audios to save
408
+ output_dir = output_path
409
+
410
+ # Ensure output directory exists
411
+ os.makedirs(output_dir, exist_ok=True)
412
+
413
+ # Save each audio
414
+ for i, audio_item in enumerate(audio_np):
415
+ audio_item = self._prepare_audio_for_save(audio_item, normalize)
416
+ file_path = os.path.join(output_dir, f"{batch_prefix}{i}.wav")
417
+ sf.write(file_path, audio_item, sampling_rate)
418
+ saved_paths.append(file_path)
419
+
420
+ else:
421
+ # Handle different dimensions
422
+ if len(audio_np.shape) >= 3: # (B, C, T) or similar
423
+ # Get batch size
424
+ batch_size = audio_np.shape[0]
425
+
426
+ if batch_size > 1:
427
+ # Multiple audios in a batch
428
+ output_dir = output_path
429
+
430
+ # Ensure output directory exists
431
+ os.makedirs(output_dir, exist_ok=True)
432
+
433
+ # Save each audio in the batch
434
+ for i in range(batch_size):
435
+ # Extract single audio and remove channel dim if present
436
+ single_audio = audio_np[i]
437
+ if len(single_audio.shape) > 1:
438
+ if single_audio.shape[0] == 1: # (1, T)
439
+ single_audio = single_audio.squeeze(0)
440
+
441
+ single_audio = self._prepare_audio_for_save(single_audio, normalize)
442
+ file_path = os.path.join(output_dir, f"{batch_prefix}{i}.wav")
443
+ sf.write(file_path, single_audio, sampling_rate)
444
+ saved_paths.append(file_path)
445
+ else:
446
+ # Single audio with batch and channel dims
447
+ audio_item = audio_np.squeeze() # Remove batch and channel dimensions
448
+ audio_item = self._prepare_audio_for_save(audio_item, normalize)
449
+ sf.write(output_path, audio_item, sampling_rate)
450
+ saved_paths.append(output_path)
451
+ else:
452
+ # Single audio without batch dimension
453
+ audio_item = self._prepare_audio_for_save(audio_np, normalize)
454
+ sf.write(output_path, audio_item, sampling_rate)
455
+ saved_paths.append(output_path)
456
+
457
+ return saved_paths
458
+
459
+ def _prepare_audio_for_save(self, audio: np.ndarray, normalize: bool) -> np.ndarray:
460
+ """
461
+ Prepare audio for saving by ensuring it's the right shape and optionally normalizing.
462
+
463
+ Args:
464
+ audio: Audio data as numpy array
465
+ normalize: Whether to normalize audio
466
+
467
+ Returns:
468
+ np.ndarray: Processed audio ready for saving
469
+ """
470
+ # Ensure right dimensionality
471
+ if len(audio.shape) > 1 and audio.shape[0] == 1: # (1, T)
472
+ audio = audio.squeeze(0)
473
+
474
+ # Normalize if requested
475
+ if normalize:
476
+ max_val = np.abs(audio).max()
477
+ if max_val > 0:
478
+ audio = audio / max_val
479
+
480
+ return audio
481
+
482
+
483
+ __all__ = ["VibeVoiceTokenizerProcessor", "AudioNormalizer"]
VibeVoice-finetuning/src/vibevoice/schedule/__init__.py ADDED
File without changes
VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (176 Bytes). View file