Upload finetune_vibevoice_lora105.py
Browse files
VibeVoice-finetuning/src/finetune_vibevoice_lora105.py
ADDED
|
@@ -0,0 +1,1044 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# train_vibevoice_lora.py
|
| 2 |
+
import os
|
| 3 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
| 4 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
import os
|
| 8 |
+
from dataclasses import dataclass, field
|
| 9 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
from datasets import load_dataset, DatasetDict, VerificationMode
|
| 15 |
+
|
| 16 |
+
from transformers import (
|
| 17 |
+
HfArgumentParser,
|
| 18 |
+
Trainer,
|
| 19 |
+
set_seed,
|
| 20 |
+
TrainerCallback,
|
| 21 |
+
)
|
| 22 |
+
from transformers import TrainingArguments as HfTrainingArguments
|
| 23 |
+
|
| 24 |
+
from peft import LoraConfig, get_peft_model, TaskType
|
| 25 |
+
|
| 26 |
+
from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
|
| 27 |
+
from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
|
| 28 |
+
from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
|
| 29 |
+
|
| 30 |
+
from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
|
| 31 |
+
|
| 32 |
+
logger = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
# ================== SAMPLE CALLBACK UTILS ==================
|
| 35 |
+
|
| 36 |
+
import copy
|
| 37 |
+
import torch
|
| 38 |
+
from transformers import TrainerCallback
|
| 39 |
+
|
| 40 |
+
class EmaCallback(TrainerCallback):
|
| 41 |
+
def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cuda"):
|
| 42 |
+
"""
|
| 43 |
+
attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
|
| 44 |
+
decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
|
| 45 |
+
"""
|
| 46 |
+
self.attr_path = attr_path
|
| 47 |
+
self.decay = float(decay)
|
| 48 |
+
self.device = torch.device(device)
|
| 49 |
+
self.shadow = None
|
| 50 |
+
self._orig = None # store non-EMA weights when we swap
|
| 51 |
+
|
| 52 |
+
def _get_module(self, model):
|
| 53 |
+
# Resolve dotted path like "model.prediction_head"
|
| 54 |
+
mod = model
|
| 55 |
+
for name in self.attr_path.split('.'):
|
| 56 |
+
mod = getattr(mod, name)
|
| 57 |
+
return mod
|
| 58 |
+
|
| 59 |
+
def on_train_begin(self, args, state, control, model=None, **kwargs):
|
| 60 |
+
head = self._get_module(model)
|
| 61 |
+
self.shadow = {k: p.detach().to(self.device).clone()
|
| 62 |
+
for k, p in head.state_dict().items()}
|
| 63 |
+
|
| 64 |
+
def on_step_end(self, args, state, control, model=None, **kwargs):
|
| 65 |
+
if self.shadow is None: return
|
| 66 |
+
head = self._get_module(model)
|
| 67 |
+
with torch.no_grad():
|
| 68 |
+
for k, v in head.state_dict().items():
|
| 69 |
+
self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
|
| 70 |
+
|
| 71 |
+
# ---- Swap helpers ----
|
| 72 |
+
def _swap_in_ema(self, model):
|
| 73 |
+
head = self._get_module(model)
|
| 74 |
+
self._orig = copy.deepcopy(head.state_dict())
|
| 75 |
+
head.load_state_dict(self.shadow, strict=False)
|
| 76 |
+
|
| 77 |
+
def _swap_back(self, model):
|
| 78 |
+
if self._orig is None: return
|
| 79 |
+
head = self._get_module(model)
|
| 80 |
+
head.load_state_dict(self._orig, strict=False)
|
| 81 |
+
self._orig = None
|
| 82 |
+
|
| 83 |
+
def on_evaluate(self, args, state, control, model=None, **kwargs):
|
| 84 |
+
# use EMA during eval
|
| 85 |
+
self._swap_in_ema(model)
|
| 86 |
+
|
| 87 |
+
def on_evaluate_end(self, args, state, control, model=None, **kwargs):
|
| 88 |
+
self._swap_back(model)
|
| 89 |
+
|
| 90 |
+
def on_save(self, args, state, control, model=None, **kwargs):
|
| 91 |
+
# temporarily swap to EMA, let Trainer save, then swap back
|
| 92 |
+
self._swap_in_ema(model)
|
| 93 |
+
|
| 94 |
+
def on_save_end(self, args, state, control, model=None, **kwargs):
|
| 95 |
+
self._swap_back(model)
|
| 96 |
+
|
| 97 |
+
def on_train_end(self, args, state, control, model=None, **kwargs):
|
| 98 |
+
# final checkpoint: persist EMA
|
| 99 |
+
self._swap_in_ema(model)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@dataclass
|
| 103 |
+
class ModelArguments:
|
| 104 |
+
model_name_or_path: Optional[str] = field(
|
| 105 |
+
default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
|
| 106 |
+
)
|
| 107 |
+
processor_name_or_path: Optional[str] = field(
|
| 108 |
+
default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
|
| 109 |
+
)
|
| 110 |
+
cache_dir: Optional[str] = field(default=None)
|
| 111 |
+
freeze_acoustic_tokenizer: bool = field(default=True)
|
| 112 |
+
freeze_semantic_tokenizer: bool = field(default=True)
|
| 113 |
+
lora_r: int = field(default=8)
|
| 114 |
+
lora_alpha: int = field(default=32)
|
| 115 |
+
lora_dropout: float = field(default=0.05)
|
| 116 |
+
lora_target_modules: str = field(
|
| 117 |
+
default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
|
| 118 |
+
metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
|
| 119 |
+
)
|
| 120 |
+
lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
|
| 121 |
+
train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
|
| 122 |
+
train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
|
| 123 |
+
layers_to_freeze: Optional[str] = field(
|
| 124 |
+
default=None,
|
| 125 |
+
metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
@dataclass
|
| 129 |
+
class DataArguments:
|
| 130 |
+
dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
|
| 131 |
+
dataset_config_name: Optional[str] = field(default=None)
|
| 132 |
+
train_split_name: str = field(default="train")
|
| 133 |
+
eval_split_name: Optional[str] = field(default="validation")
|
| 134 |
+
text_column_name: str = field(default="text")
|
| 135 |
+
audio_column_name: str = field(default="audio")
|
| 136 |
+
voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
|
| 137 |
+
eval_split_size: float = field(default=0.0)
|
| 138 |
+
ignore_verifications: bool = field(default=False)
|
| 139 |
+
max_length: Optional[int] = field(default=None)
|
| 140 |
+
train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
|
| 141 |
+
validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
|
| 142 |
+
voice_prompt_drop_rate: float = field(
|
| 143 |
+
default=0.0,
|
| 144 |
+
metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
@dataclass
|
| 148 |
+
class CustomTrainingArguments(HfTrainingArguments):
|
| 149 |
+
ddpm_batch_mul: int = field(default=1)
|
| 150 |
+
ce_loss_weight: float = field(default=1.0)
|
| 151 |
+
diffusion_loss_weight: float = field(default=1.0)
|
| 152 |
+
debug_ce_details: bool = field(default=False)
|
| 153 |
+
debug_ce_topk: int = field(default=5)
|
| 154 |
+
debug_ce_max_examples: int = field(default=1)
|
| 155 |
+
debug_ce_every_n_steps: int = field(default=200)
|
| 156 |
+
gradient_clipping: bool = field(
|
| 157 |
+
default=False,
|
| 158 |
+
metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
|
| 159 |
+
)
|
| 160 |
+
debug_save: bool = field(
|
| 161 |
+
default=False,
|
| 162 |
+
metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
def build_lora_config(args: ModelArguments) -> LoraConfig:
|
| 166 |
+
target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
|
| 167 |
+
return LoraConfig(
|
| 168 |
+
r=args.lora_r,
|
| 169 |
+
lora_alpha=args.lora_alpha,
|
| 170 |
+
lora_dropout=args.lora_dropout,
|
| 171 |
+
bias="none",
|
| 172 |
+
task_type=TaskType.CAUSAL_LM,
|
| 173 |
+
target_modules=target_modules,
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
def build_head_lora_config(args: ModelArguments) -> LoraConfig:
|
| 177 |
+
target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
|
| 178 |
+
return LoraConfig(
|
| 179 |
+
r=args.lora_r,
|
| 180 |
+
lora_alpha=args.lora_alpha,
|
| 181 |
+
lora_dropout=args.lora_dropout,
|
| 182 |
+
bias="none",
|
| 183 |
+
task_type=TaskType.FEATURE_EXTRACTION,
|
| 184 |
+
target_modules=target_modules,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
|
| 188 |
+
shifted = labels[:, 1:].contiguous()
|
| 189 |
+
base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
|
| 190 |
+
label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
|
| 191 |
+
final_mask = base_mask & (~label_is_acoustic)
|
| 192 |
+
out = shifted.clone()
|
| 193 |
+
out[~final_mask] = pad_id
|
| 194 |
+
return out
|
| 195 |
+
|
| 196 |
+
def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
|
| 197 |
+
try:
|
| 198 |
+
acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
|
| 199 |
+
if acoustic is None or not hasattr(acoustic, "encode"):
|
| 200 |
+
logger_.warning("No acoustic_tokenizer.encode() found to patch.")
|
| 201 |
+
return
|
| 202 |
+
base_encode = acoustic.encode
|
| 203 |
+
def encode_wrapped(*args, **kwargs):
|
| 204 |
+
out = base_encode(*args, **kwargs)
|
| 205 |
+
try:
|
| 206 |
+
_ = out[0][0]
|
| 207 |
+
return out
|
| 208 |
+
except Exception:
|
| 209 |
+
pass
|
| 210 |
+
if isinstance(out, dict):
|
| 211 |
+
for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
|
| 212 |
+
if k in out:
|
| 213 |
+
return [[out[k]]]
|
| 214 |
+
if len(out) > 0:
|
| 215 |
+
return [[next(iter(out.values()))]]
|
| 216 |
+
for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
|
| 217 |
+
if hasattr(out, attr):
|
| 218 |
+
return [[getattr(out, attr)]]
|
| 219 |
+
try:
|
| 220 |
+
if isinstance(out, torch.Tensor):
|
| 221 |
+
return [[out]]
|
| 222 |
+
except Exception:
|
| 223 |
+
pass
|
| 224 |
+
return [[out]]
|
| 225 |
+
acoustic.encode = encode_wrapped
|
| 226 |
+
logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
|
| 227 |
+
except Exception as e:
|
| 228 |
+
logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
|
| 229 |
+
|
| 230 |
+
def main() -> None:
|
| 231 |
+
parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
|
| 232 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 233 |
+
|
| 234 |
+
logging.basicConfig(
|
| 235 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 236 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 237 |
+
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
|
| 238 |
+
)
|
| 239 |
+
logger.info("Training/evaluation parameters %s", training_args)
|
| 240 |
+
set_seed(training_args.seed)
|
| 241 |
+
|
| 242 |
+
# Configure gradient clipping
|
| 243 |
+
if not getattr(training_args, "gradient_clipping", False):
|
| 244 |
+
if hasattr(training_args, "max_grad_norm"):
|
| 245 |
+
training_args.max_grad_norm = 0.0
|
| 246 |
+
logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
|
| 247 |
+
else:
|
| 248 |
+
if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
|
| 249 |
+
training_args.max_grad_norm = 1.0
|
| 250 |
+
logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
|
| 251 |
+
|
| 252 |
+
# Load processor
|
| 253 |
+
processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
|
| 254 |
+
if processor_path is None:
|
| 255 |
+
raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
|
| 256 |
+
processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
|
| 257 |
+
|
| 258 |
+
# Required special tokens
|
| 259 |
+
tok = processor.tokenizer
|
| 260 |
+
for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
|
| 261 |
+
if not hasattr(tok, required) or getattr(tok, required) is None:
|
| 262 |
+
raise RuntimeError(f"Tokenizer missing required special id: {required}")
|
| 263 |
+
|
| 264 |
+
# Load model
|
| 265 |
+
if model_args.model_name_or_path is None:
|
| 266 |
+
raise ValueError("--model_name_or_path is required to load VibeVoice base model")
|
| 267 |
+
dtype = torch.float32
|
| 268 |
+
if training_args.bf16:
|
| 269 |
+
dtype = torch.bfloat16
|
| 270 |
+
elif getattr(training_args, "fp16", False):
|
| 271 |
+
dtype = torch.float16
|
| 272 |
+
model = VibeVoiceForConditionalGeneration.from_pretrained(
|
| 273 |
+
model_args.model_name_or_path,
|
| 274 |
+
torch_dtype=dtype, device_map={"": 0},
|
| 275 |
+
)
|
| 276 |
+
_patch_acoustic_encode_for_legacy_indexing(model, logger)
|
| 277 |
+
processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
|
| 278 |
+
|
| 279 |
+
# Diagnostics: LM head tie
|
| 280 |
+
try:
|
| 281 |
+
in_emb_mod = model.get_input_embeddings()
|
| 282 |
+
out_emb_mod = model.get_output_embeddings()
|
| 283 |
+
in_w = getattr(in_emb_mod, "weight", None)
|
| 284 |
+
out_w = getattr(out_emb_mod, "weight", None)
|
| 285 |
+
shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
|
| 286 |
+
values_equal = False
|
| 287 |
+
if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
|
| 288 |
+
try:
|
| 289 |
+
values_equal = bool(torch.allclose(in_w, out_w))
|
| 290 |
+
except Exception:
|
| 291 |
+
values_equal = False
|
| 292 |
+
try:
|
| 293 |
+
tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
|
| 294 |
+
except Exception:
|
| 295 |
+
tie_cfg = getattr(model.config, "tie_word_embeddings", None)
|
| 296 |
+
logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
|
| 297 |
+
if out_w is not None:
|
| 298 |
+
logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
|
| 299 |
+
except Exception as e:
|
| 300 |
+
logger.warning(f"LM head tie diagnostics failed: {e}")
|
| 301 |
+
|
| 302 |
+
# Hard-tie LM head
|
| 303 |
+
try:
|
| 304 |
+
emb_module = model.get_input_embeddings()
|
| 305 |
+
head_module = model.get_output_embeddings()
|
| 306 |
+
if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
|
| 307 |
+
if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
|
| 308 |
+
with torch.no_grad():
|
| 309 |
+
head_module.weight = emb_module.weight
|
| 310 |
+
logger.info("Force-tied LM head weight to input embeddings (pointer share).")
|
| 311 |
+
except Exception as e:
|
| 312 |
+
logger.warning(f"Force-tie of LM head failed: {e}")
|
| 313 |
+
|
| 314 |
+
# Validate special IDs (info logs only)
|
| 315 |
+
try:
|
| 316 |
+
special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
|
| 317 |
+
try:
|
| 318 |
+
vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
|
| 319 |
+
except Exception:
|
| 320 |
+
vocab_size = 0
|
| 321 |
+
in_emb_mod = model.get_input_embeddings()
|
| 322 |
+
out_emb_mod = model.get_output_embeddings()
|
| 323 |
+
in_w = getattr(in_emb_mod, "weight", None)
|
| 324 |
+
out_w = getattr(out_emb_mod, "weight", None)
|
| 325 |
+
for name in special_names:
|
| 326 |
+
val = getattr(tok, name, None)
|
| 327 |
+
exists = (val is not None)
|
| 328 |
+
in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
|
| 329 |
+
equal_row = None
|
| 330 |
+
if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
|
| 331 |
+
try:
|
| 332 |
+
equal_row = bool(torch.allclose(in_w[val], out_w[val]))
|
| 333 |
+
except Exception:
|
| 334 |
+
equal_row = False
|
| 335 |
+
decoded_str = None
|
| 336 |
+
if exists and isinstance(val, int):
|
| 337 |
+
try:
|
| 338 |
+
decoded_str = tok.decode([val])
|
| 339 |
+
except Exception:
|
| 340 |
+
try:
|
| 341 |
+
decoded_str = tok.convert_ids_to_tokens(val)
|
| 342 |
+
except Exception:
|
| 343 |
+
decoded_str = "<decode_failed>"
|
| 344 |
+
logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
|
| 345 |
+
except Exception as e:
|
| 346 |
+
logger.warning(f"Special token ID/row validation failed: {e}")
|
| 347 |
+
|
| 348 |
+
# Quick tokenizer diagnostics (optional)
|
| 349 |
+
try:
|
| 350 |
+
logger.info("=== TOKENIZER DIAGNOSTICS ===")
|
| 351 |
+
logger.info(f"Tokenizer class: {type(tok).__name__}")
|
| 352 |
+
logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
|
| 353 |
+
# tiny CE smoke test
|
| 354 |
+
with torch.no_grad():
|
| 355 |
+
simple_text = "The cat sat on the mat."
|
| 356 |
+
simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
|
| 357 |
+
simple_mask = torch.ones_like(simple_ids)
|
| 358 |
+
x = model.get_input_embeddings()(simple_ids)
|
| 359 |
+
outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
|
| 360 |
+
logits = model.lm_head(outputs.last_hidden_state)
|
| 361 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 362 |
+
shift_labels = simple_ids[:, 1:].contiguous()
|
| 363 |
+
ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
|
| 364 |
+
logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
|
| 365 |
+
except Exception as e:
|
| 366 |
+
logger.warning(f"Tokenizer diagnostics failed: {e}")
|
| 367 |
+
|
| 368 |
+
# Disable cache during training
|
| 369 |
+
if hasattr(model.config, "use_cache") and training_args.do_train:
|
| 370 |
+
model.config.use_cache = False
|
| 371 |
+
|
| 372 |
+
# Freeze tokenizers
|
| 373 |
+
if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
|
| 374 |
+
for p in model.model.acoustic_tokenizer.parameters():
|
| 375 |
+
p.requires_grad = False
|
| 376 |
+
if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
|
| 377 |
+
for p in model.model.semantic_tokenizer.parameters():
|
| 378 |
+
p.requires_grad = False
|
| 379 |
+
|
| 380 |
+
# LoRA wrap LLM (optional)
|
| 381 |
+
lora_cfg = build_lora_config(model_args)
|
| 382 |
+
tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
|
| 383 |
+
skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
|
| 384 |
+
if not skip_lm_lora:
|
| 385 |
+
model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
|
| 386 |
+
else:
|
| 387 |
+
logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
|
| 388 |
+
|
| 389 |
+
try:
|
| 390 |
+
model.tie_weights()
|
| 391 |
+
except Exception:
|
| 392 |
+
pass
|
| 393 |
+
|
| 394 |
+
# Freeze all then enable trainable subsets
|
| 395 |
+
for _, p in model.named_parameters():
|
| 396 |
+
p.requires_grad = False
|
| 397 |
+
|
| 398 |
+
try:
|
| 399 |
+
for n, p in model.model.language_model.named_parameters():
|
| 400 |
+
if "lora_A" in n or "lora_B" in n:
|
| 401 |
+
p.requires_grad = True
|
| 402 |
+
except Exception:
|
| 403 |
+
logger.warning("Could not re-enable LoRA params on language_model.")
|
| 404 |
+
|
| 405 |
+
# Diffusion head LoRA wrapping (optional)
|
| 406 |
+
if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
|
| 407 |
+
class _HeadForwardShim(nn.Module):
|
| 408 |
+
def __init__(self, base: nn.Module): super().__init__(); self.base = base
|
| 409 |
+
def forward(self, *args, **kwargs):
|
| 410 |
+
if len(args) >= 3:
|
| 411 |
+
noisy_images, timesteps, condition = args[:3]
|
| 412 |
+
else:
|
| 413 |
+
noisy_images = kwargs.get("noisy_images")
|
| 414 |
+
timesteps = kwargs.get("timesteps")
|
| 415 |
+
condition = kwargs.get("condition")
|
| 416 |
+
return self.base(noisy_images, timesteps, condition)
|
| 417 |
+
try:
|
| 418 |
+
shim = _HeadForwardShim(model.model.prediction_head)
|
| 419 |
+
model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
|
| 420 |
+
for n, p in model.model.prediction_head.named_parameters():
|
| 421 |
+
if "lora_A" in n or "lora_B" in n:
|
| 422 |
+
p.requires_grad = True
|
| 423 |
+
except Exception as e:
|
| 424 |
+
logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
|
| 425 |
+
|
| 426 |
+
# Train full diffusion head (optional)
|
| 427 |
+
if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
|
| 428 |
+
for p in model.model.prediction_head.parameters():
|
| 429 |
+
p.requires_grad = True
|
| 430 |
+
|
| 431 |
+
# Freeze diffusion head layers (optional)
|
| 432 |
+
if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
|
| 433 |
+
head_params = list(model.model.prediction_head.named_parameters())
|
| 434 |
+
try:
|
| 435 |
+
indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
|
| 436 |
+
frozen_count = 0
|
| 437 |
+
for i, (name, param) in enumerate(head_params):
|
| 438 |
+
if i in indices_to_freeze:
|
| 439 |
+
param.requires_grad = False
|
| 440 |
+
frozen_count += 1
|
| 441 |
+
logger.info(f"Froze layer [{i}]: {name}")
|
| 442 |
+
logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
|
| 443 |
+
except Exception as e:
|
| 444 |
+
logger.error(f"Could not parse --layers_to_freeze: {e}")
|
| 445 |
+
raise
|
| 446 |
+
|
| 447 |
+
# Connectors
|
| 448 |
+
if getattr(model_args, "train_connectors", False):
|
| 449 |
+
if hasattr(model.model, "acoustic_connector"):
|
| 450 |
+
for p in model.model.acoustic_connector.parameters():
|
| 451 |
+
p.requires_grad = True
|
| 452 |
+
if hasattr(model.model, "semantic_connector"):
|
| 453 |
+
for p in model.model.semantic_connector.parameters():
|
| 454 |
+
p.requires_grad = True
|
| 455 |
+
else:
|
| 456 |
+
if hasattr(model.model, "acoustic_connector"):
|
| 457 |
+
for p in model.model.acoustic_connector.parameters():
|
| 458 |
+
p.requires_grad = False
|
| 459 |
+
if hasattr(model.model, "semantic_connector"):
|
| 460 |
+
for p in model.model.semantic_connector.parameters():
|
| 461 |
+
p.requires_grad = False
|
| 462 |
+
|
| 463 |
+
# Freeze embedding + head
|
| 464 |
+
try:
|
| 465 |
+
emb = model.get_input_embeddings()
|
| 466 |
+
if hasattr(emb, "weight"):
|
| 467 |
+
emb.weight.requires_grad_(False)
|
| 468 |
+
head = model.get_output_embeddings()
|
| 469 |
+
if head is not None and hasattr(head, "weight"):
|
| 470 |
+
head.weight.requires_grad_(False)
|
| 471 |
+
except Exception:
|
| 472 |
+
pass
|
| 473 |
+
|
| 474 |
+
# Diagnostics
|
| 475 |
+
def _sum_params(named_iter):
|
| 476 |
+
return sum(p.numel() for _, p in named_iter if p.requires_grad)
|
| 477 |
+
try:
|
| 478 |
+
lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
|
| 479 |
+
pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
|
| 480 |
+
ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
|
| 481 |
+
se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
|
| 482 |
+
total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 483 |
+
logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
|
| 484 |
+
logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
|
| 485 |
+
except Exception:
|
| 486 |
+
pass
|
| 487 |
+
|
| 488 |
+
# Preprocessed data classes
|
| 489 |
+
class PreprocessedBatchDataset:
|
| 490 |
+
def __init__(self, preprocessed_file: str):
|
| 491 |
+
self.data = torch.load(preprocessed_file, map_location='cpu')
|
| 492 |
+
logger.info(f"Loaded {len(self.data)} preprocessed batches from {preprocessed_file}")
|
| 493 |
+
|
| 494 |
+
def __len__(self):
|
| 495 |
+
return len(self.data)
|
| 496 |
+
|
| 497 |
+
def __getitem__(self, idx):
|
| 498 |
+
batch = self.data[idx]
|
| 499 |
+
result = {}
|
| 500 |
+
for k, v in batch.items():
|
| 501 |
+
if isinstance(v, torch.Tensor):
|
| 502 |
+
result[k] = v
|
| 503 |
+
else:
|
| 504 |
+
result[k] = v
|
| 505 |
+
return result
|
| 506 |
+
|
| 507 |
+
class PreprocessedBatchSubset:
|
| 508 |
+
def __init__(self, dataset: 'PreprocessedBatchDataset', indices: List[int]):
|
| 509 |
+
self.dataset = dataset
|
| 510 |
+
self.indices = indices
|
| 511 |
+
|
| 512 |
+
def __len__(self):
|
| 513 |
+
return len(self.indices)
|
| 514 |
+
|
| 515 |
+
def __getitem__(self, idx):
|
| 516 |
+
actual_idx = self.indices[idx]
|
| 517 |
+
return self.dataset[actual_idx]
|
| 518 |
+
|
| 519 |
+
class PreprocessedBatchCollator:
|
| 520 |
+
def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
|
| 521 |
+
if not batch:
|
| 522 |
+
return {}
|
| 523 |
+
result = {}
|
| 524 |
+
for key in batch[0].keys():
|
| 525 |
+
tensors = [b[key] for b in batch if b[key] is not None]
|
| 526 |
+
if tensors and isinstance(tensors[0], torch.Tensor):
|
| 527 |
+
result[key] = torch.cat(tensors, dim=0)
|
| 528 |
+
else:
|
| 529 |
+
result[key] = tensors[0] if tensors else None
|
| 530 |
+
return result
|
| 531 |
+
|
| 532 |
+
# Datasets
|
| 533 |
+
preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
|
| 534 |
+
preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
|
| 535 |
+
|
| 536 |
+
if os.path.exists(preprocessed_file):
|
| 537 |
+
logger.info(f"Loading preprocessed data from {preprocessed_file}")
|
| 538 |
+
preprocessed_data = PreprocessedBatchDataset(preprocessed_file)
|
| 539 |
+
|
| 540 |
+
train_dataset = preprocessed_data
|
| 541 |
+
eval_dataset = None
|
| 542 |
+
|
| 543 |
+
if training_args.do_eval and data_args.eval_split_size and data_args.eval_split_size > 0 and len(preprocessed_data) > 1:
|
| 544 |
+
num_eval = max(1, int(len(preprocessed_data) * data_args.eval_split_size))
|
| 545 |
+
num_train = len(preprocessed_data) - num_eval
|
| 546 |
+
indices = list(range(len(preprocessed_data)))
|
| 547 |
+
import random
|
| 548 |
+
random.Random(training_args.seed).shuffle(indices)
|
| 549 |
+
train_indices = indices[:num_train]
|
| 550 |
+
eval_indices = indices[num_train:]
|
| 551 |
+
train_dataset = PreprocessedBatchSubset(preprocessed_data, train_indices)
|
| 552 |
+
eval_dataset = PreprocessedBatchSubset(preprocessed_data, eval_indices)
|
| 553 |
+
else:
|
| 554 |
+
logger.info(f"Preprocessed data not found at {preprocessed_file}, loading from raw JSONL/HF datasets")
|
| 555 |
+
verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
|
| 556 |
+
if data_args.train_jsonl is not None:
|
| 557 |
+
data_files: Dict[str, str] = {"train": data_args.train_jsonl}
|
| 558 |
+
if data_args.validation_jsonl is not None:
|
| 559 |
+
data_files["validation"] = data_args.validation_jsonl
|
| 560 |
+
raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
|
| 561 |
+
else:
|
| 562 |
+
if data_args.dataset_name is None:
|
| 563 |
+
raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
|
| 564 |
+
raw = load_dataset(
|
| 565 |
+
data_args.dataset_name,
|
| 566 |
+
data_args.dataset_config_name,
|
| 567 |
+
verification_mode=verification_mode,
|
| 568 |
+
cache_dir=model_args.cache_dir,
|
| 569 |
+
)
|
| 570 |
+
train_ds = raw[data_args.train_split_name]
|
| 571 |
+
eval_ds = None
|
| 572 |
+
if training_args.do_eval:
|
| 573 |
+
if data_args.eval_split_name and data_args.eval_split_name in raw:
|
| 574 |
+
eval_ds = raw[data_args.eval_split_name]
|
| 575 |
+
elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
|
| 576 |
+
split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
|
| 577 |
+
train_ds, eval_ds = split["train"], split["test"]
|
| 578 |
+
|
| 579 |
+
train_dataset = VibeVoiceDataset(
|
| 580 |
+
train_ds,
|
| 581 |
+
text_column=data_args.text_column_name,
|
| 582 |
+
audio_column=data_args.audio_column_name,
|
| 583 |
+
voice_prompts_column=data_args.voice_prompts_column_name,
|
| 584 |
+
)
|
| 585 |
+
eval_dataset = None
|
| 586 |
+
if eval_ds is not None:
|
| 587 |
+
eval_dataset = VibeVoiceDataset(
|
| 588 |
+
eval_ds,
|
| 589 |
+
text_column=data_args.text_column_name,
|
| 590 |
+
audio_column=data_args.audio_column_name,
|
| 591 |
+
voice_prompts_column=data_args.voice_prompts_column_name,
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
# Ratios/dims from processor+model
|
| 595 |
+
speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
|
| 596 |
+
semantic_dim = getattr(model.config, "semantic_vae_dim", None)
|
| 597 |
+
if semantic_dim is None:
|
| 598 |
+
try:
|
| 599 |
+
semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
|
| 600 |
+
except Exception:
|
| 601 |
+
semantic_dim = 128
|
| 602 |
+
|
| 603 |
+
compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
|
| 604 |
+
|
| 605 |
+
if os.path.exists(preprocessed_file):
|
| 606 |
+
data_collator = PreprocessedBatchCollator()
|
| 607 |
+
else:
|
| 608 |
+
data_collator = VibeVoiceCollator(
|
| 609 |
+
processor=processor,
|
| 610 |
+
max_length=data_args.max_length,
|
| 611 |
+
speech_compress_ratio=speech_compress_ratio,
|
| 612 |
+
semantic_vae_dim=semantic_dim,
|
| 613 |
+
compute_semantics=compute_semantics_flag,
|
| 614 |
+
debug_checks=False,
|
| 615 |
+
voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
|
| 616 |
+
)
|
| 617 |
+
|
| 618 |
+
class LoRADebugCallback(TrainerCallback):
|
| 619 |
+
def __init__(self, log_every_n_steps: int = 50):
|
| 620 |
+
self.log_every_n_steps = max(1, int(log_every_n_steps))
|
| 621 |
+
self.prev_param_norms: Dict[str, float] = {}
|
| 622 |
+
self.lora_param_names: List[str] = []
|
| 623 |
+
|
| 624 |
+
def on_train_begin(self, args, state, control, model=None, **kwargs):
|
| 625 |
+
try:
|
| 626 |
+
if model is None:
|
| 627 |
+
return
|
| 628 |
+
named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
|
| 629 |
+
self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
|
| 630 |
+
for n in self.lora_param_names:
|
| 631 |
+
p = named[n]
|
| 632 |
+
self.prev_param_norms[n] = float(p.data.norm().item())
|
| 633 |
+
total = len(self.lora_param_names)
|
| 634 |
+
req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
|
| 635 |
+
num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
|
| 636 |
+
num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
|
| 637 |
+
zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
|
| 638 |
+
logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
|
| 639 |
+
if total == 0:
|
| 640 |
+
logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
|
| 641 |
+
if req_grad != total:
|
| 642 |
+
logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
|
| 643 |
+
except Exception as e:
|
| 644 |
+
logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
|
| 645 |
+
|
| 646 |
+
def on_step_end(self, args, state, control, model=None, **kwargs):
|
| 647 |
+
try:
|
| 648 |
+
if model is None or len(self.lora_param_names) == 0:
|
| 649 |
+
return
|
| 650 |
+
step = int(getattr(state, "global_step", 0) or 0)
|
| 651 |
+
if step % self.log_every_n_steps != 0 and step != 1:
|
| 652 |
+
return
|
| 653 |
+
named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
|
| 654 |
+
changed_A = 0
|
| 655 |
+
changed_B = 0
|
| 656 |
+
zero_B = 0
|
| 657 |
+
eps = 1e-12
|
| 658 |
+
for n in self.lora_param_names:
|
| 659 |
+
p = named.get(n, None)
|
| 660 |
+
if p is None:
|
| 661 |
+
continue
|
| 662 |
+
prev = self.prev_param_norms.get(n, 0.0)
|
| 663 |
+
curr = float(p.data.norm().item())
|
| 664 |
+
if "lora_A" in n and abs(curr - prev) > eps:
|
| 665 |
+
changed_A += 1
|
| 666 |
+
if "lora_B" in n:
|
| 667 |
+
if abs(curr - prev) > eps:
|
| 668 |
+
changed_B += 1
|
| 669 |
+
if curr == 0.0:
|
| 670 |
+
zero_B += 1
|
| 671 |
+
self.prev_param_norms[n] = curr
|
| 672 |
+
total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
|
| 673 |
+
total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
|
| 674 |
+
logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
|
| 675 |
+
except Exception as e:
|
| 676 |
+
logger.warning(f"LoRA debug (on_step_end) failed: {e}")
|
| 677 |
+
|
| 678 |
+
class VibeVoiceTrainer(Trainer):
|
| 679 |
+
def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
|
| 680 |
+
labels = inputs.get("input_ids")
|
| 681 |
+
attention_mask = inputs.get("attention_mask")
|
| 682 |
+
acoustic_input_mask = inputs.get("acoustic_input_mask")
|
| 683 |
+
|
| 684 |
+
# Ensure semantic tensors exist and have correct dtype/device
|
| 685 |
+
sem = inputs.get("speech_semantic_tensors", None)
|
| 686 |
+
try:
|
| 687 |
+
target_dtype = next(model.model.semantic_connector.parameters()).dtype
|
| 688 |
+
except Exception:
|
| 689 |
+
target_dtype = model.get_input_embeddings().weight.dtype
|
| 690 |
+
|
| 691 |
+
if sem is None:
|
| 692 |
+
sm = inputs.get("speech_masks")
|
| 693 |
+
if sm is not None:
|
| 694 |
+
zeros = torch.zeros(
|
| 695 |
+
sm.size(0), sm.size(1),
|
| 696 |
+
getattr(model.config, "semantic_vae_dim", 128),
|
| 697 |
+
dtype=target_dtype,
|
| 698 |
+
device=sm.device,
|
| 699 |
+
)
|
| 700 |
+
inputs["speech_semantic_tensors"] = zeros
|
| 701 |
+
else:
|
| 702 |
+
if isinstance(sem, torch.Tensor):
|
| 703 |
+
inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
|
| 704 |
+
|
| 705 |
+
outputs = model(
|
| 706 |
+
input_ids=inputs.get("input_ids"),
|
| 707 |
+
attention_mask=attention_mask,
|
| 708 |
+
speech_tensors=inputs.get("speech_tensors"),
|
| 709 |
+
speech_masks=inputs.get("speech_masks"),
|
| 710 |
+
speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
|
| 711 |
+
acoustic_input_mask=acoustic_input_mask,
|
| 712 |
+
acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
|
| 713 |
+
speeches_loss_input=inputs.get("speeches_loss_input"),
|
| 714 |
+
ddpm_batch_mul=training_args.ddpm_batch_mul,
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
# Invariants: token/latent selection equality across views (warn, don't assert)
|
| 718 |
+
try:
|
| 719 |
+
al_mask = inputs.get("acoustic_loss_mask")
|
| 720 |
+
sp_masks = inputs.get("speech_masks")
|
| 721 |
+
sp_loss_sel = inputs.get("speeches_loss_input")
|
| 722 |
+
num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
|
| 723 |
+
num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
|
| 724 |
+
num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
|
| 725 |
+
num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
|
| 726 |
+
self.log({
|
| 727 |
+
"debug/num_tok_total": float(num_tok_total),
|
| 728 |
+
"debug/num_tok_loss": float(num_tok_loss),
|
| 729 |
+
"debug/num_lat_total": float(num_lat_total),
|
| 730 |
+
"debug/num_lat_loss": float(num_lat_loss),
|
| 731 |
+
})
|
| 732 |
+
if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
|
| 733 |
+
if num_tok_loss != num_lat_loss:
|
| 734 |
+
logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
|
| 735 |
+
except Exception:
|
| 736 |
+
pass
|
| 737 |
+
|
| 738 |
+
# CE Loss
|
| 739 |
+
logits = outputs.logits
|
| 740 |
+
ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
|
| 741 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 742 |
+
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
|
| 743 |
+
ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
|
| 744 |
+
|
| 745 |
+
# Optional CE diagnostics
|
| 746 |
+
try:
|
| 747 |
+
self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
|
| 748 |
+
except Exception as e:
|
| 749 |
+
logger.warning(f"Failed invoking CE debug: {e}")
|
| 750 |
+
|
| 751 |
+
# Diffusion loss
|
| 752 |
+
diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
|
| 753 |
+
total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
|
| 754 |
+
|
| 755 |
+
# Logs
|
| 756 |
+
try:
|
| 757 |
+
prefix = "train" if model.training else "eval"
|
| 758 |
+
self.log({
|
| 759 |
+
f"{prefix}/ce_loss": ce_loss.detach().item(),
|
| 760 |
+
f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
|
| 761 |
+
})
|
| 762 |
+
if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
|
| 763 |
+
lr_val = self.optimizer.param_groups[0].get("lr", None)
|
| 764 |
+
if lr_val is not None:
|
| 765 |
+
self.log({"train/learning_rate_real": float(lr_val)})
|
| 766 |
+
except Exception:
|
| 767 |
+
pass
|
| 768 |
+
|
| 769 |
+
return (total, outputs) if return_outputs else total
|
| 770 |
+
|
| 771 |
+
def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
|
| 772 |
+
try:
|
| 773 |
+
if not getattr(training_args, "debug_ce_details", False):
|
| 774 |
+
return
|
| 775 |
+
step = int(getattr(self.state, "global_step", 0) or 0)
|
| 776 |
+
every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
|
| 777 |
+
if not (step <= 1 or (step % every_n == 0)):
|
| 778 |
+
return
|
| 779 |
+
|
| 780 |
+
with torch.no_grad():
|
| 781 |
+
vocab = shift_logits.size(-1)
|
| 782 |
+
per_token_loss = F.cross_entropy(
|
| 783 |
+
shift_logits.view(-1, vocab),
|
| 784 |
+
ce_labels.view(-1),
|
| 785 |
+
reduction="none",
|
| 786 |
+
ignore_index=-100,
|
| 787 |
+
).view_as(ce_labels)
|
| 788 |
+
|
| 789 |
+
valid_mask = ce_labels.ne(-100)
|
| 790 |
+
num_valid = int(valid_mask.sum().item())
|
| 791 |
+
avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
|
| 792 |
+
|
| 793 |
+
per_ex_avgs = []
|
| 794 |
+
max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
|
| 795 |
+
B = ce_labels.size(0)
|
| 796 |
+
for b in range(min(B, max_examples)):
|
| 797 |
+
vb = valid_mask[b]
|
| 798 |
+
if int(vb.sum().item()) > 0:
|
| 799 |
+
per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
|
| 800 |
+
else:
|
| 801 |
+
per_ex_avgs.append(float("nan"))
|
| 802 |
+
logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
|
| 803 |
+
except Exception as e:
|
| 804 |
+
logger.warning(f"CE detailed debug failed: {e}")
|
| 805 |
+
|
| 806 |
+
# --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
|
| 810 |
+
try:
|
| 811 |
+
target_dir = output_dir or self.args.output_dir
|
| 812 |
+
lora_out = os.path.join(target_dir, "lora")
|
| 813 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 814 |
+
|
| 815 |
+
# --- LLM PEFT adapters (if LoRA-wrapped) ---
|
| 816 |
+
language_model = getattr(self.model.model, "language_model", None)
|
| 817 |
+
if hasattr(language_model, "save_pretrained"):
|
| 818 |
+
language_model.save_pretrained(lora_out)
|
| 819 |
+
|
| 820 |
+
# --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
|
| 821 |
+
pred_head = getattr(self.model.model, "prediction_head", None)
|
| 822 |
+
if hasattr(pred_head, "save_pretrained"):
|
| 823 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 824 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 825 |
+
pred_head.save_pretrained(ph_dir)
|
| 826 |
+
|
| 827 |
+
# --- ALWAYS save FULL diffusion head state_dict for fallback ---
|
| 828 |
+
if pred_head is not None and hasattr(pred_head, "state_dict"):
|
| 829 |
+
sd = pred_head.state_dict()
|
| 830 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 831 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 832 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 833 |
+
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
|
| 834 |
+
|
| 835 |
+
# --- Connectors (plain state_dicts) ---
|
| 836 |
+
ac = getattr(self.model.model, "acoustic_connector", None)
|
| 837 |
+
if ac is not None:
|
| 838 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 839 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 840 |
+
torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 841 |
+
|
| 842 |
+
se = getattr(self.model.model, "semantic_connector", None)
|
| 843 |
+
if se is not None:
|
| 844 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 845 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 846 |
+
torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 847 |
+
|
| 848 |
+
except Exception as e:
|
| 849 |
+
logger.warning(f"Failed to save LoRA assets: {e}")
|
| 850 |
+
|
| 851 |
+
|
| 852 |
+
# ------------- Build the Trainer -------------
|
| 853 |
+
|
| 854 |
+
# Resolve which adapters to apply in samples
|
| 855 |
+
|
| 856 |
+
ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cuda")
|
| 857 |
+
|
| 858 |
+
# --- CRITICAL FIX: CAST TRAINABLE PARAMS TO FP32 ---
|
| 859 |
+
# This prevents 'ValueError: Attempting to unscale FP16 gradients'
|
| 860 |
+
if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
|
| 861 |
+
print('>>> INFO: Enforcing float32 for trainable parameters (LoRA/Head) to fix GradScaler.')
|
| 862 |
+
for name, param in model.named_parameters():
|
| 863 |
+
if param.requires_grad:
|
| 864 |
+
param.data = param.data.to(torch.float32)
|
| 865 |
+
# ---------------------------------------------------
|
| 866 |
+
|
| 867 |
+
trainer = VibeVoiceTrainer(
|
| 868 |
+
model=model,
|
| 869 |
+
args=training_args,
|
| 870 |
+
train_dataset=train_dataset,
|
| 871 |
+
eval_dataset=eval_dataset,
|
| 872 |
+
data_collator=data_collator,
|
| 873 |
+
callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
|
| 874 |
+
)
|
| 875 |
+
|
| 876 |
+
# Optional debug pre-training save
|
| 877 |
+
if getattr(training_args, "debug_save", False):
|
| 878 |
+
try:
|
| 879 |
+
debug_dir = os.path.join(training_args.output_dir, "debug_initial")
|
| 880 |
+
lora_out = os.path.join(debug_dir, "lora")
|
| 881 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 882 |
+
logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
|
| 883 |
+
# language model adapters / base
|
| 884 |
+
try:
|
| 885 |
+
if hasattr(model.model.language_model, "save_pretrained"):
|
| 886 |
+
model.model.language_model.save_pretrained(lora_out)
|
| 887 |
+
except Exception as e_lm:
|
| 888 |
+
logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
|
| 889 |
+
# diffusion head
|
| 890 |
+
try:
|
| 891 |
+
if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
|
| 892 |
+
model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
|
| 893 |
+
except Exception as e_head:
|
| 894 |
+
logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
|
| 895 |
+
# NEW: full diffusion head state_dict as fallback
|
| 896 |
+
try:
|
| 897 |
+
ph = getattr(model.model, "prediction_head", None)
|
| 898 |
+
if ph is not None and hasattr(ph, "state_dict"):
|
| 899 |
+
sd = ph.state_dict()
|
| 900 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 901 |
+
os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
|
| 902 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
|
| 903 |
+
except Exception as e:
|
| 904 |
+
logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
|
| 905 |
+
# connectors
|
| 906 |
+
try:
|
| 907 |
+
ac_conn = getattr(model.model, "acoustic_connector", None)
|
| 908 |
+
if ac_conn is not None:
|
| 909 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 910 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 911 |
+
torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 912 |
+
except Exception as e_ac:
|
| 913 |
+
logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
|
| 914 |
+
try:
|
| 915 |
+
se_conn = getattr(model.model, "semantic_connector", None)
|
| 916 |
+
if se_conn is not None:
|
| 917 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 918 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 919 |
+
torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 920 |
+
except Exception as e_se:
|
| 921 |
+
logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
|
| 922 |
+
except Exception as e:
|
| 923 |
+
logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
|
| 924 |
+
|
| 925 |
+
if getattr(training_args, "gradient_checkpointing", False):
|
| 926 |
+
try:
|
| 927 |
+
model.gradient_checkpointing_enable()
|
| 928 |
+
except Exception:
|
| 929 |
+
logger.warning("Failed to enable gradient checkpointing on the model.")
|
| 930 |
+
|
| 931 |
+
# =========================================================================
|
| 932 |
+
# Load Custom Weights from Checkpoint before resuming training
|
| 933 |
+
# =========================================================================
|
| 934 |
+
if training_args.do_train and training_args.resume_from_checkpoint:
|
| 935 |
+
checkpoint_path = None
|
| 936 |
+
if isinstance(training_args.resume_from_checkpoint, bool) and training_args.resume_from_checkpoint:
|
| 937 |
+
from transformers.trainer_utils import get_last_checkpoint
|
| 938 |
+
checkpoint_path = get_last_checkpoint(training_args.output_dir)
|
| 939 |
+
else:
|
| 940 |
+
checkpoint_path = training_args.resume_from_checkpoint
|
| 941 |
+
|
| 942 |
+
if checkpoint_path is not None and os.path.exists(checkpoint_path):
|
| 943 |
+
lora_dir = os.path.join(checkpoint_path, "lora")
|
| 944 |
+
if os.path.exists(lora_dir):
|
| 945 |
+
logger.info(f"*** Resuming custom weights (LoRA, Connectors, Head) from {lora_dir} ***")
|
| 946 |
+
|
| 947 |
+
# 1. Load LLM LoRA
|
| 948 |
+
if hasattr(model.model, "language_model"):
|
| 949 |
+
try:
|
| 950 |
+
from peft import load_peft_weights, set_peft_model_state_dict
|
| 951 |
+
adapters_weights = load_peft_weights(lora_dir)
|
| 952 |
+
set_peft_model_state_dict(model.model.language_model, adapters_weights)
|
| 953 |
+
logger.info("Successfully loaded LLM LoRA weights.")
|
| 954 |
+
except Exception as e:
|
| 955 |
+
logger.warning(f"Could not load LLM LoRA weights: {e}")
|
| 956 |
+
|
| 957 |
+
# 2. Load Diffusion Head
|
| 958 |
+
ph_full_path = os.path.join(lora_dir, "diffusion_head_full.bin")
|
| 959 |
+
if os.path.exists(ph_full_path) and hasattr(model.model, "prediction_head"):
|
| 960 |
+
try:
|
| 961 |
+
model.model.prediction_head.load_state_dict(torch.load(ph_full_path, map_location="cpu"), strict=False)
|
| 962 |
+
logger.info("Successfully loaded Diffusion Head weights.")
|
| 963 |
+
except Exception as e:
|
| 964 |
+
logger.warning(f"Failed to load Diffusion Head weights: {e}")
|
| 965 |
+
|
| 966 |
+
# 3. Load Acoustic Connector
|
| 967 |
+
ac_path = os.path.join(lora_dir, "acoustic_connector", "pytorch_model.bin")
|
| 968 |
+
if os.path.exists(ac_path) and hasattr(model.model, "acoustic_connector"):
|
| 969 |
+
try:
|
| 970 |
+
model.model.acoustic_connector.load_state_dict(torch.load(ac_path, map_location="cpu"))
|
| 971 |
+
logger.info("Successfully loaded Acoustic Connector weights.")
|
| 972 |
+
except Exception as e:
|
| 973 |
+
logger.warning(f"Failed to load Acoustic Connector weights: {e}")
|
| 974 |
+
|
| 975 |
+
# 4. Load Semantic Connector
|
| 976 |
+
se_path = os.path.join(lora_dir, "semantic_connector", "pytorch_model.bin")
|
| 977 |
+
if os.path.exists(se_path) and hasattr(model.model, "semantic_connector"):
|
| 978 |
+
try:
|
| 979 |
+
model.model.semantic_connector.load_state_dict(torch.load(se_path, map_location="cpu"))
|
| 980 |
+
logger.info("Successfully loaded Semantic Connector weights.")
|
| 981 |
+
except Exception as e:
|
| 982 |
+
logger.warning(f"Failed to load Semantic Connector weights: {e}")
|
| 983 |
+
else:
|
| 984 |
+
logger.warning(f"No custom 'lora' directory found inside checkpoint: {checkpoint_path}")
|
| 985 |
+
# =========================================================================
|
| 986 |
+
|
| 987 |
+
if training_args.do_train:
|
| 988 |
+
# ----- THE FIX: SET resume_from_checkpoint=False HERE -----
|
| 989 |
+
# The weights are ALREADY loaded via the custom block above.
|
| 990 |
+
# Setting this to False forces Trainer to start counting steps/epochs from 0
|
| 991 |
+
# for your new dataset, preventing it from immediately exiting.
|
| 992 |
+
trainer.train(resume_from_checkpoint=False)
|
| 993 |
+
|
| 994 |
+
lora_out = os.path.join(training_args.output_dir, "lora")
|
| 995 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 996 |
+
|
| 997 |
+
# LLM PEFT (if any)
|
| 998 |
+
lm = getattr(model.model, "language_model", None)
|
| 999 |
+
if hasattr(lm, "save_pretrained"):
|
| 1000 |
+
lm.save_pretrained(lora_out)
|
| 1001 |
+
|
| 1002 |
+
# Diffusion head PEFT (if any)
|
| 1003 |
+
ph = getattr(model.model, "prediction_head", None)
|
| 1004 |
+
if hasattr(ph, "save_pretrained"):
|
| 1005 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 1006 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 1007 |
+
ph.save_pretrained(ph_dir)
|
| 1008 |
+
|
| 1009 |
+
# ALWAYS: full diffusion head state_dict fallback
|
| 1010 |
+
try:
|
| 1011 |
+
if ph is not None and hasattr(ph, "state_dict"):
|
| 1012 |
+
sd = ph.state_dict()
|
| 1013 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 1014 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 1015 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 1016 |
+
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
|
| 1017 |
+
except Exception as e:
|
| 1018 |
+
logger.warning(f"Failed to save FULL diffusion head at end: {e}")
|
| 1019 |
+
|
| 1020 |
+
# Connectors (if trained)
|
| 1021 |
+
try:
|
| 1022 |
+
ac = getattr(model.model, "acoustic_connector", None)
|
| 1023 |
+
if ac is not None:
|
| 1024 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 1025 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 1026 |
+
torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 1027 |
+
except Exception as e:
|
| 1028 |
+
logger.warning(f"Failed to save acoustic_connector: {e}")
|
| 1029 |
+
|
| 1030 |
+
try:
|
| 1031 |
+
se = getattr(model.model, "semantic_connector", None)
|
| 1032 |
+
if se is not None:
|
| 1033 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 1034 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 1035 |
+
torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 1036 |
+
except Exception as e:
|
| 1037 |
+
logger.warning(f"Failed to save semantic_connector: {e}")
|
| 1038 |
+
|
| 1039 |
+
if training_args.do_eval and eval_dataset is not None:
|
| 1040 |
+
trainer.evaluate()
|
| 1041 |
+
|
| 1042 |
+
|
| 1043 |
+
if __name__ == "__main__":
|
| 1044 |
+
main()
|