amir0907 commited on
Commit
caa904e
·
verified ·
1 Parent(s): a9a73b9

Upload data via Kaggle

Browse files
Files changed (34) hide show
  1. VibeVoice-finetuning/src/__pycache__/data_vibevoice.cpython-312.pyc +0 -0
  2. VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora10.cpython-312.pyc +0 -0
  3. VibeVoice-finetuning/src/finetune_vibevoice_lora00.py +1005 -0
  4. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-312.pyc +0 -0
  5. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-312.pyc +0 -0
  6. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-312.pyc +0 -0
  7. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-312.pyc +0 -0
  8. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-312.pyc +0 -0
  9. VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-312.pyc +0 -0
  10. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/__init__.cpython-312.pyc +0 -0
  11. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-312.pyc +0 -0
  12. VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_tokenizer_processor.cpython-312.pyc +0 -0
  13. VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/__init__.cpython-312.pyc +0 -0
  14. VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/dpm_solver.cpython-312.pyc +0 -0
  15. VibeVoice-finetuning/wandb/debug-internal.log +6 -11
  16. VibeVoice-finetuning/wandb/debug.log +21 -23
  17. lor/.gitattributes +67 -0
  18. lor/VibeVoice-finetuning/checkpoint-3600/lora/README.md +202 -0
  19. lor/VibeVoice-finetuning/checkpoint-3600/lora/acoustic_connector/pytorch_model.bin +3 -0
  20. lor/VibeVoice-finetuning/checkpoint-3600/lora/adapter_config.json +31 -0
  21. lor/VibeVoice-finetuning/checkpoint-3600/lora/adapter_model.safetensors +3 -0
  22. lor/VibeVoice-finetuning/checkpoint-3600/lora/diffusion_head/config.json +20 -0
  23. lor/VibeVoice-finetuning/checkpoint-3600/lora/diffusion_head/diffusion_head_full.bin +3 -0
  24. lor/VibeVoice-finetuning/checkpoint-3600/lora/diffusion_head/model.safetensors +3 -0
  25. lor/VibeVoice-finetuning/checkpoint-3600/lora/diffusion_head_full.bin +3 -0
  26. lor/VibeVoice-finetuning/checkpoint-3600/lora/semantic_connector/pytorch_model.bin +3 -0
  27. lor/VibeVoice-finetuning/checkpoint-3600/optimizer.pt +3 -0
  28. lor/VibeVoice-finetuning/checkpoint-3600/pytorch_model.bin +3 -0
  29. lor/VibeVoice-finetuning/checkpoint-3600/rng_state.pth +3 -0
  30. lor/VibeVoice-finetuning/checkpoint-3600/scaler.pt +3 -0
  31. lor/VibeVoice-finetuning/checkpoint-3600/scheduler.pt +3 -0
  32. lor/VibeVoice-finetuning/checkpoint-3600/trainer_state.json +3 -0
  33. preprocessed/.gitattributes +60 -0
  34. preprocessed/preprocessed_batches.pt +3 -0
VibeVoice-finetuning/src/__pycache__/data_vibevoice.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/__pycache__/data_vibevoice.cpython-312.pyc and b/VibeVoice-finetuning/src/__pycache__/data_vibevoice.cpython-312.pyc differ
 
VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora10.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora10.cpython-312.pyc and b/VibeVoice-finetuning/src/__pycache__/finetune_vibevoice_lora10.cpython-312.pyc differ
 
VibeVoice-finetuning/src/finetune_vibevoice_lora00.py ADDED
@@ -0,0 +1,1005 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_vibevoice_lora.py
2
+ import os
3
+ # متغیر زیر کامنت شده است تا سیستم بتواند تمام GPUها را در حالت DDP ببیند
4
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
5
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
6
+
7
+ import logging
8
+ import copy
9
+ from dataclasses import dataclass, field
10
+ from typing import Any, Dict, List, Optional, Tuple
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+ from datasets import load_dataset, DatasetDict, VerificationMode
16
+
17
+ from transformers import (
18
+ HfArgumentParser,
19
+ Trainer,
20
+ set_seed,
21
+ TrainerCallback,
22
+ )
23
+ from transformers import TrainingArguments as HfTrainingArguments
24
+
25
+ from peft import LoraConfig, get_peft_model, TaskType
26
+
27
+ from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
28
+ from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
29
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
30
+
31
+ from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+ # ================== SAMPLE CALLBACK UTILS ==================
36
+
37
+ class EmaCallback(TrainerCallback):
38
+ def __init__(self, attr_path="model.prediction_head", decay=0.999):
39
+ """
40
+ attr_path: where the head lives under self.model
41
+ decay: EMA decay
42
+ """
43
+ self.attr_path = attr_path
44
+ self.decay = float(decay)
45
+ self.shadow = None
46
+ self._orig = None # store non-EMA weights when we swap
47
+
48
+ def _get_module(self, model):
49
+ # رفع مشکل DDP: دسترسی به مدل اصلی در صورت Wrap شدن با DistributedDataParallel
50
+ mod = model.module if hasattr(model, "module") else model
51
+ for name in self.attr_path.split('.'):
52
+ mod = getattr(mod, name)
53
+ return mod
54
+
55
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
56
+ head = self._get_module(model)
57
+ # استفاده از دیوایس داینامیک برای پشتیبانی از چند گرافیک
58
+ self.shadow = {k: p.detach().clone()
59
+ for k, p in head.state_dict().items()}
60
+
61
+ def on_step_end(self, args, state, control, model=None, **kwargs):
62
+ if self.shadow is None: return
63
+ head = self._get_module(model)
64
+ with torch.no_grad():
65
+ for k, v in head.state_dict().items():
66
+ target_device = self.shadow[k].device
67
+ self.shadow[k].mul_(self.decay).add_(v.detach().to(target_device), alpha=(1.0 - self.decay))
68
+
69
+ # ---- Swap helpers ----
70
+ def _swap_in_ema(self, model):
71
+ head = self._get_module(model)
72
+ self._orig = copy.deepcopy(head.state_dict())
73
+ head.load_state_dict(self.shadow, strict=False)
74
+
75
+ def _swap_back(self, model):
76
+ if self._orig is None: return
77
+ head = self._get_module(model)
78
+ head.load_state_dict(self._orig, strict=False)
79
+ self._orig = None
80
+
81
+ def on_evaluate(self, args, state, control, model=None, **kwargs):
82
+ self._swap_in_ema(model)
83
+
84
+ def on_evaluate_end(self, args, state, control, model=None, **kwargs):
85
+ self._swap_back(model)
86
+
87
+ def on_save(self, args, state, control, model=None, **kwargs):
88
+ self._swap_in_ema(model)
89
+
90
+ def on_save_end(self, args, state, control, model=None, **kwargs):
91
+ self._swap_back(model)
92
+
93
+ def on_train_end(self, args, state, control, model=None, **kwargs):
94
+ self._swap_in_ema(model)
95
+
96
+
97
+ @dataclass
98
+ class ModelArguments:
99
+ model_name_or_path: Optional[str] = field(
100
+ default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
101
+ )
102
+ processor_name_or_path: Optional[str] = field(
103
+ default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
104
+ )
105
+ cache_dir: Optional[str] = field(default=None)
106
+ freeze_acoustic_tokenizer: bool = field(default=True)
107
+ freeze_semantic_tokenizer: bool = field(default=True)
108
+ lora_r: int = field(default=8)
109
+ lora_alpha: int = field(default=32)
110
+ lora_dropout: float = field(default=0.05)
111
+ lora_target_modules: str = field(
112
+ default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
113
+ metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
114
+ )
115
+ lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
116
+ train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
117
+ train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
118
+ layers_to_freeze: Optional[str] = field(
119
+ default=None,
120
+ metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
121
+ )
122
+
123
+ @dataclass
124
+ class DataArguments:
125
+ dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
126
+ dataset_config_name: Optional[str] = field(default=None)
127
+ train_split_name: str = field(default="train")
128
+ eval_split_name: Optional[str] = field(default="validation")
129
+ text_column_name: str = field(default="text")
130
+ audio_column_name: str = field(default="audio")
131
+ voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
132
+ eval_split_size: float = field(default=0.0)
133
+ ignore_verifications: bool = field(default=False)
134
+ max_length: Optional[int] = field(default=None)
135
+ train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
136
+ validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
137
+ voice_prompt_drop_rate: float = field(
138
+ default=0.0,
139
+ metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
140
+ )
141
+
142
+ @dataclass
143
+ class CustomTrainingArguments(HfTrainingArguments):
144
+ ddpm_batch_mul: int = field(default=1)
145
+ ce_loss_weight: float = field(default=1.0)
146
+ diffusion_loss_weight: float = field(default=1.0)
147
+ debug_ce_details: bool = field(default=False)
148
+ debug_ce_topk: int = field(default=5)
149
+ debug_ce_max_examples: int = field(default=1)
150
+ debug_ce_every_n_steps: int = field(default=200)
151
+ gradient_clipping: bool = field(
152
+ default=False,
153
+ metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
154
+ )
155
+ debug_save: bool = field(
156
+ default=False,
157
+ metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
158
+ )
159
+
160
+ def build_lora_config(args: ModelArguments) -> LoraConfig:
161
+ target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
162
+ return LoraConfig(
163
+ r=args.lora_r,
164
+ lora_alpha=args.lora_alpha,
165
+ lora_dropout=args.lora_dropout,
166
+ bias="none",
167
+ task_type=TaskType.CAUSAL_LM,
168
+ target_modules=target_modules,
169
+ )
170
+
171
+ def build_head_lora_config(args: ModelArguments) -> LoraConfig:
172
+ target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
173
+ return LoraConfig(
174
+ r=args.lora_r,
175
+ lora_alpha=args.lora_alpha,
176
+ lora_dropout=args.lora_dropout,
177
+ bias="none",
178
+ task_type=TaskType.FEATURE_EXTRACTION,
179
+ target_modules=target_modules,
180
+ )
181
+
182
+ def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
183
+ shifted = labels[:, 1:].contiguous()
184
+ base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
185
+ label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
186
+ final_mask = base_mask & (~label_is_acoustic)
187
+ out = shifted.clone()
188
+ out[~final_mask] = pad_id
189
+ return out
190
+
191
+ def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
192
+ try:
193
+ # هندل کردن دسترسی به مدل در حالت DDP
194
+ actual_model = model_obj.module if hasattr(model_obj, "module") else model_obj
195
+ acoustic = getattr(getattr(actual_model, "model", actual_model), "acoustic_tokenizer", None)
196
+ if acoustic is None or not hasattr(acoustic, "encode"):
197
+ logger_.warning("No acoustic_tokenizer.encode() found to patch.")
198
+ return
199
+ base_encode = acoustic.encode
200
+ def encode_wrapped(*args, **kwargs):
201
+ out = base_encode(*args, **kwargs)
202
+ try:
203
+ _ = out[0][0]
204
+ return out
205
+ except Exception:
206
+ pass
207
+ if isinstance(out, dict):
208
+ for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
209
+ if k in out:
210
+ return [[out[k]]]
211
+ if len(out) > 0:
212
+ return [[next(iter(out.values()))]]
213
+ for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
214
+ if hasattr(out, attr):
215
+ return [[getattr(out, attr)]]
216
+ try:
217
+ if isinstance(out, torch.Tensor):
218
+ return [[out]]
219
+ except Exception:
220
+ pass
221
+ return [[out]]
222
+ acoustic.encode = encode_wrapped
223
+ logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
224
+ except Exception as e:
225
+ logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
226
+
227
+ def main() -> None:
228
+ parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
229
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
230
+
231
+ logging.basicConfig(
232
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
233
+ datefmt="%m/%d/%Y %H:%M:%S",
234
+ level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
235
+ )
236
+ logger.info("Training/evaluation parameters %s", training_args)
237
+ set_seed(training_args.seed)
238
+
239
+ # بدست آوردن Rank گرافیک فعلی برای تخصیص صحیح در DDP
240
+ local_rank = int(os.environ.get("LOCAL_RANK", -1))
241
+ device_map = {"": local_rank} if local_rank != -1 else None
242
+
243
+ # Configure gradient clipping
244
+ if not getattr(training_args, "gradient_clipping", False):
245
+ if hasattr(training_args, "max_grad_norm"):
246
+ training_args.max_grad_norm = 0.0
247
+ logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
248
+ else:
249
+ if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
250
+ training_args.max_grad_norm = 1.0
251
+ logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
252
+
253
+ # Load processor
254
+ processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
255
+ if processor_path is None:
256
+ raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
257
+ processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
258
+
259
+ # Required special tokens
260
+ tok = processor.tokenizer
261
+ for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
262
+ if not hasattr(tok, required) or getattr(tok, required) is None:
263
+ raise RuntimeError(f"Tokenizer missing required special id: {required}")
264
+
265
+ # Load model (تخصیص مدل به گرافیک‌ها با استفاده از device_map محاسبه شده)
266
+ if model_args.model_name_or_path is None:
267
+ raise ValueError("--model_name_or_path is required to load VibeVoice base model")
268
+ dtype = torch.float32
269
+ if training_args.bf16:
270
+ dtype = torch.bfloat16
271
+ elif getattr(training_args, "fp16", False):
272
+ dtype = torch.float16
273
+ model = VibeVoiceForConditionalGeneration.from_pretrained(
274
+ model_args.model_name_or_path,
275
+ torch_dtype=dtype,
276
+ device_map=device_map,
277
+ )
278
+
279
+ _patch_acoustic_encode_for_legacy_indexing(model, logger)
280
+ processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
281
+
282
+ # Diagnostics: LM head tie
283
+ try:
284
+ in_emb_mod = model.get_input_embeddings()
285
+ out_emb_mod = model.get_output_embeddings()
286
+ in_w = getattr(in_emb_mod, "weight", None)
287
+ out_w = getattr(out_emb_mod, "weight", None)
288
+ shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
289
+ values_equal = False
290
+ if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
291
+ try:
292
+ values_equal = bool(torch.allclose(in_w, out_w))
293
+ except Exception:
294
+ values_equal = False
295
+ try:
296
+ tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
297
+ except Exception:
298
+ tie_cfg = getattr(model.config, "tie_word_embeddings", None)
299
+ logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
300
+ if out_w is not None:
301
+ logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
302
+ except Exception as e:
303
+ logger.warning(f"LM head tie diagnostics failed: {e}")
304
+
305
+ # Hard-tie LM head
306
+ try:
307
+ emb_module = model.get_input_embeddings()
308
+ head_module = model.get_output_embeddings()
309
+ if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
310
+ if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
311
+ with torch.no_grad():
312
+ head_module.weight = emb_module.weight
313
+ logger.info("Force-tied LM head weight to input embeddings (pointer share).")
314
+ except Exception as e:
315
+ logger.warning(f"Force-tie of LM head failed: {e}")
316
+
317
+ # Validate special IDs (info logs only)
318
+ try:
319
+ special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
320
+ try:
321
+ vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
322
+ except Exception:
323
+ vocab_size = 0
324
+ in_emb_mod = model.get_input_embeddings()
325
+ out_emb_mod = model.get_output_embeddings()
326
+ in_w = getattr(in_emb_mod, "weight", None)
327
+ out_w = getattr(out_emb_mod, "weight", None)
328
+ for name in special_names:
329
+ val = getattr(tok, name, None)
330
+ exists = (val is not None)
331
+ in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
332
+ equal_row = None
333
+ if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
334
+ try:
335
+ equal_row = bool(torch.allclose(in_w[val], out_w[val]))
336
+ except Exception:
337
+ equal_row = False
338
+ decoded_str = None
339
+ if exists and isinstance(val, int):
340
+ try:
341
+ decoded_str = tok.decode([val])
342
+ except Exception:
343
+ try:
344
+ decoded_str = tok.convert_ids_to_tokens(val)
345
+ except Exception:
346
+ decoded_str = "<decode_failed>"
347
+ logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
348
+ except Exception as e:
349
+ logger.warning(f"Special token ID/row validation failed: {e}")
350
+
351
+ # Quick tokenizer diagnostics (optional)
352
+ try:
353
+ logger.info("=== TOKENIZER DIAGNOSTICS ===")
354
+ logger.info(f"Tokenizer class: {type(tok).__name__}")
355
+ logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
356
+ # tiny CE smoke test
357
+ with torch.no_grad():
358
+ simple_text = "The cat sat on the mat."
359
+ simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
360
+ simple_mask = torch.ones_like(simple_ids)
361
+ x = model.get_input_embeddings()(simple_ids)
362
+ outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
363
+ logits = model.lm_head(outputs.last_hidden_state)
364
+ shift_logits = logits[:, :-1, :].contiguous()
365
+ shift_labels = simple_ids[:, 1:].contiguous()
366
+ ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
367
+ logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
368
+ except Exception as e:
369
+ logger.warning(f"Tokenizer diagnostics failed: {e}")
370
+
371
+ # Disable cache during training
372
+ if hasattr(model.config, "use_cache") and training_args.do_train:
373
+ model.config.use_cache = False
374
+
375
+ # Freeze tokenizers
376
+ if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
377
+ for p in model.model.acoustic_tokenizer.parameters():
378
+ p.requires_grad = False
379
+ if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
380
+ for p in model.model.semantic_tokenizer.parameters():
381
+ p.requires_grad = False
382
+
383
+ # LoRA wrap LLM (optional)
384
+ lora_cfg = build_lora_config(model_args)
385
+ tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
386
+ skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
387
+ if not skip_lm_lora:
388
+ model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
389
+ else:
390
+ logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
391
+
392
+ try:
393
+ model.tie_weights()
394
+ except Exception:
395
+ pass
396
+
397
+ # Freeze all then enable trainable subsets
398
+ for _, p in model.named_parameters():
399
+ p.requires_grad = False
400
+
401
+ try:
402
+ for n, p in model.model.language_model.named_parameters():
403
+ if "lora_A" in n or "lora_B" in n:
404
+ p.requires_grad = True
405
+ except Exception:
406
+ logger.warning("Could not re-enable LoRA params on language_model.")
407
+
408
+ # Diffusion head LoRA wrapping (optional)
409
+ if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
410
+ class _HeadForwardShim(nn.Module):
411
+ def __init__(self, base: nn.Module): super().__init__(); self.base = base
412
+ def forward(self, *args, **kwargs):
413
+ if len(args) >= 3:
414
+ noisy_images, timesteps, condition = args[:3]
415
+ else:
416
+ noisy_images = kwargs.get("noisy_images")
417
+ timesteps = kwargs.get("timesteps")
418
+ condition = kwargs.get("condition")
419
+ return self.base(noisy_images, timesteps, condition)
420
+ try:
421
+ shim = _HeadForwardShim(model.model.prediction_head)
422
+ model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
423
+ for n, p in model.model.prediction_head.named_parameters():
424
+ if "lora_A" in n or "lora_B" in n:
425
+ p.requires_grad = True
426
+ except Exception as e:
427
+ logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
428
+
429
+ # Train full diffusion head (optional)
430
+ if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
431
+ for p in model.model.prediction_head.parameters():
432
+ p.requires_grad = True
433
+
434
+ # Freeze diffusion head layers (optional)
435
+ if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
436
+ head_params = list(model.model.prediction_head.named_parameters())
437
+ try:
438
+ indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
439
+ frozen_count = 0
440
+ for i, (name, param) in enumerate(head_params):
441
+ if i in indices_to_freeze:
442
+ param.requires_grad = False
443
+ frozen_count += 1
444
+ logger.info(f"Froze layer [{i}]: {name}")
445
+ logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
446
+ except Exception as e:
447
+ logger.error(f"Could not parse --layers_to_freeze: {e}")
448
+ raise
449
+
450
+ # Connectors
451
+ if getattr(model_args, "train_connectors", False):
452
+ if hasattr(model.model, "acoustic_connector"):
453
+ for p in model.model.acoustic_connector.parameters():
454
+ p.requires_grad = True
455
+ if hasattr(model.model, "semantic_connector"):
456
+ for p in model.model.semantic_connector.parameters():
457
+ p.requires_grad = True
458
+ else:
459
+ if hasattr(model.model, "acoustic_connector"):
460
+ for p in model.model.acoustic_connector.parameters():
461
+ p.requires_grad = False
462
+ if hasattr(model.model, "semantic_connector"):
463
+ for p in model.model.semantic_connector.parameters():
464
+ p.requires_grad = False
465
+
466
+ # Freeze embedding + head
467
+ try:
468
+ emb = model.get_input_embeddings()
469
+ if hasattr(emb, "weight"):
470
+ emb.weight.requires_grad_(False)
471
+ head = model.get_output_embeddings()
472
+ if head is not None and hasattr(head, "weight"):
473
+ head.weight.requires_grad_(False)
474
+ except Exception:
475
+ pass
476
+
477
+ # Diagnostics
478
+ def _sum_params(named_iter):
479
+ return sum(p.numel() for _, p in named_iter if p.requires_grad)
480
+ try:
481
+ lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
482
+ pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
483
+ ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
484
+ se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
485
+ total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
486
+ logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
487
+ logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
488
+ except Exception:
489
+ pass
490
+
491
+ # Preprocessed data classes
492
+ class PreprocessedBatchDataset:
493
+ def __init__(self, preprocessed_file: str):
494
+ self.data = torch.load(preprocessed_file, map_location='cpu')
495
+ logger.info(f"Loaded {len(self.data)} preprocessed batches from {preprocessed_file}")
496
+
497
+ def __len__(self):
498
+ return len(self.data)
499
+
500
+ def __getitem__(self, idx):
501
+ batch = self.data[idx]
502
+ result = {}
503
+ for k, v in batch.items():
504
+ if isinstance(v, torch.Tensor):
505
+ result[k] = v
506
+ else:
507
+ result[k] = v
508
+ return result
509
+
510
+ class PreprocessedBatchSubset:
511
+ def __init__(self, dataset: 'PreprocessedBatchDataset', indices: List[int]):
512
+ self.dataset = dataset
513
+ self.indices = indices
514
+
515
+ def __len__(self):
516
+ return len(self.indices)
517
+
518
+ def __getitem__(self, idx):
519
+ actual_idx = self.indices[idx]
520
+ return self.dataset[actual_idx]
521
+
522
+ class PreprocessedBatchCollator:
523
+ def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
524
+ if not batch:
525
+ return {}
526
+ result = {}
527
+ for key in batch[0].keys():
528
+ tensors = [b[key] for b in batch if b[key] is not None]
529
+ if tensors and isinstance(tensors[0], torch.Tensor):
530
+ result[key] = torch.cat(tensors, dim=0)
531
+ else:
532
+ result[key] = tensors[0] if tensors else None
533
+ return result
534
+
535
+ # Datasets
536
+ preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
537
+ preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
538
+
539
+ if os.path.exists(preprocessed_file):
540
+ logger.info(f"Loading preprocessed data from {preprocessed_file}")
541
+ preprocessed_data = PreprocessedBatchDataset(preprocessed_file)
542
+
543
+ train_dataset = preprocessed_data
544
+ eval_dataset = None
545
+
546
+ if training_args.do_eval and data_args.eval_split_size and data_args.eval_split_size > 0 and len(preprocessed_data) > 1:
547
+ num_eval = max(1, int(len(preprocessed_data) * data_args.eval_split_size))
548
+ num_train = len(preprocessed_data) - num_eval
549
+ indices = list(range(len(preprocessed_data)))
550
+ import random
551
+ random.Random(training_args.seed).shuffle(indices)
552
+ train_indices = indices[:num_train]
553
+ eval_indices = indices[num_train:]
554
+ train_dataset = PreprocessedBatchSubset(preprocessed_data, train_indices)
555
+ eval_dataset = PreprocessedBatchSubset(preprocessed_data, eval_indices)
556
+ else:
557
+ logger.info(f"Preprocessed data not found at {preprocessed_file}, loading from raw JSONL/HF datasets")
558
+ verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
559
+ if data_args.train_jsonl is not None:
560
+ data_files: Dict[str, str] = {"train": data_args.train_jsonl}
561
+ if data_args.validation_jsonl is not None:
562
+ data_files["validation"] = data_args.validation_jsonl
563
+ raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
564
+ else:
565
+ if data_args.dataset_name is None:
566
+ raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
567
+ raw = load_dataset(
568
+ data_args.dataset_name,
569
+ data_args.dataset_config_name,
570
+ verification_mode=verification_mode,
571
+ cache_dir=model_args.cache_dir,
572
+ )
573
+ train_ds = raw[data_args.train_split_name]
574
+ eval_ds = None
575
+ if training_args.do_eval:
576
+ if data_args.eval_split_name and data_args.eval_split_name in raw:
577
+ eval_ds = raw[data_args.eval_split_name]
578
+ elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
579
+ split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
580
+ train_ds, eval_ds = split["train"], split["test"]
581
+
582
+ train_dataset = VibeVoiceDataset(
583
+ train_ds,
584
+ text_column=data_args.text_column_name,
585
+ audio_column=data_args.audio_column_name,
586
+ voice_prompts_column=data_args.voice_prompts_column_name,
587
+ )
588
+ eval_dataset = None
589
+ if eval_ds is not None:
590
+ eval_dataset = VibeVoiceDataset(
591
+ eval_ds,
592
+ text_column=data_args.text_column_name,
593
+ audio_column=data_args.audio_column_name,
594
+ voice_prompts_column=data_args.voice_prompts_column_name,
595
+ )
596
+
597
+ # Ratios/dims from processor+model
598
+ speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
599
+ semantic_dim = getattr(model.config, "semantic_vae_dim", None)
600
+ if semantic_dim is None:
601
+ try:
602
+ semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
603
+ except Exception:
604
+ semantic_dim = 128
605
+
606
+ compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
607
+
608
+ if os.path.exists(preprocessed_file):
609
+ data_collator = PreprocessedBatchCollator()
610
+ else:
611
+ data_collator = VibeVoiceCollator(
612
+ processor=processor,
613
+ max_length=data_args.max_length,
614
+ speech_compress_ratio=speech_compress_ratio,
615
+ semantic_vae_dim=semantic_dim,
616
+ compute_semantics=compute_semantics_flag,
617
+ debug_checks=False,
618
+ voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
619
+ )
620
+
621
+ class LoRADebugCallback(TrainerCallback):
622
+ def __init__(self, log_every_n_steps: int = 50):
623
+ self.log_every_n_steps = max(1, int(log_every_n_steps))
624
+ self.prev_param_norms: Dict[str, float] = {}
625
+ self.lora_param_names: List[str] = []
626
+
627
+ def on_train_begin(self, args, state, control, model=None, **kwargs):
628
+ try:
629
+ if model is None:
630
+ return
631
+ # دسترسی ایمن به مدل در حالت DDP
632
+ actual_model = model.module if hasattr(model, "module") else model
633
+ named: Dict[str, torch.nn.Parameter] = dict(actual_model.named_parameters())
634
+ self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
635
+ for n in self.lora_param_names:
636
+ p = named[n]
637
+ self.prev_param_norms[n] = float(p.data.norm().item())
638
+ total = len(self.lora_param_names)
639
+ req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
640
+ num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
641
+ num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
642
+ zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
643
+ logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
644
+ if total == 0:
645
+ logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
646
+ if req_grad != total:
647
+ logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
648
+ except Exception as e:
649
+ logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
650
+
651
+ def on_step_end(self, args, state, control, model=None, **kwargs):
652
+ try:
653
+ if model is None or len(self.lora_param_names) == 0:
654
+ return
655
+ step = int(getattr(state, "global_step", 0) or 0)
656
+ if step % self.log_every_n_steps != 0 and step != 1:
657
+ return
658
+
659
+ actual_model = model.module if hasattr(model, "module") else model
660
+ named: Dict[str, torch.nn.Parameter] = dict(actual_model.named_parameters())
661
+ changed_A = 0
662
+ changed_B = 0
663
+ zero_B = 0
664
+ eps = 1e-12
665
+ for n in self.lora_param_names:
666
+ p = named.get(n, None)
667
+ if p is None:
668
+ continue
669
+ prev = self.prev_param_norms.get(n, 0.0)
670
+ curr = float(p.data.norm().item())
671
+ if "lora_A" in n and abs(curr - prev) > eps:
672
+ changed_A += 1
673
+ if "lora_B" in n:
674
+ if abs(curr - prev) > eps:
675
+ changed_B += 1
676
+ if curr == 0.0:
677
+ zero_B += 1
678
+ self.prev_param_norms[n] = curr
679
+ total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
680
+ total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
681
+ logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
682
+ except Exception as e:
683
+ logger.warning(f"LoRA debug (on_step_end) failed: {e}")
684
+
685
+ class VibeVoiceTrainer(Trainer):
686
+ def compute_loss(self, model, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
687
+ # باز کردن DDP Wrapper برای دسترسی به متغیرهای داخلی مدل (جلوگیری از خطای DDP)
688
+ actual_model = model.module if hasattr(model, "module") else model
689
+
690
+ labels = inputs.get("input_ids")
691
+ attention_mask = inputs.get("attention_mask")
692
+ acoustic_input_mask = inputs.get("acoustic_input_mask")
693
+
694
+ # Ensure semantic tensors exist and have correct dtype/device
695
+ sem = inputs.get("speech_semantic_tensors", None)
696
+ try:
697
+ target_dtype = next(actual_model.model.semantic_connector.parameters()).dtype
698
+ except Exception:
699
+ target_dtype = actual_model.get_input_embeddings().weight.dtype
700
+
701
+ if sem is None:
702
+ sm = inputs.get("speech_masks")
703
+ if sm is not None:
704
+ zeros = torch.zeros(
705
+ sm.size(0), sm.size(1),
706
+ getattr(actual_model.config, "semantic_vae_dim", 128),
707
+ dtype=target_dtype,
708
+ device=sm.device,
709
+ )
710
+ inputs["speech_semantic_tensors"] = zeros
711
+ else:
712
+ if isinstance(sem, torch.Tensor):
713
+ inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
714
+
715
+ outputs = model(
716
+ input_ids=inputs.get("input_ids"),
717
+ attention_mask=attention_mask,
718
+ speech_tensors=inputs.get("speech_tensors"),
719
+ speech_masks=inputs.get("speech_masks"),
720
+ speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
721
+ acoustic_input_mask=acoustic_input_mask,
722
+ acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
723
+ speeches_loss_input=inputs.get("speeches_loss_input"),
724
+ ddpm_batch_mul=training_args.ddpm_batch_mul,
725
+ )
726
+
727
+ # Invariants: token/latent selection equality across views (warn, don't assert)
728
+ try:
729
+ al_mask = inputs.get("acoustic_loss_mask")
730
+ sp_masks = inputs.get("speech_masks")
731
+ sp_loss_sel = inputs.get("speeches_loss_input")
732
+ num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
733
+ num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
734
+ num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
735
+ num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
736
+ self.log({
737
+ "debug/num_tok_total": float(num_tok_total),
738
+ "debug/num_tok_loss": float(num_tok_loss),
739
+ "debug/num_lat_total": float(num_lat_total),
740
+ "debug/num_lat_loss": float(num_lat_loss),
741
+ })
742
+ if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
743
+ if num_tok_loss != num_lat_loss:
744
+ logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
745
+ except Exception:
746
+ pass
747
+
748
+ # CE Loss
749
+ logits = outputs.logits
750
+ ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
751
+ shift_logits = logits[:, :-1, :].contiguous()
752
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
753
+ ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
754
+
755
+ # Optional CE diagnostics
756
+ try:
757
+ self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
758
+ except Exception as e:
759
+ logger.warning(f"Failed invoking CE debug: {e}")
760
+
761
+ # Diffusion loss
762
+ diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
763
+ total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
764
+
765
+ # Logs
766
+ try:
767
+ prefix = "train" if model.training else "eval"
768
+ self.log({
769
+ f"{prefix}/ce_loss": ce_loss.detach().item(),
770
+ f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
771
+ })
772
+ if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
773
+ lr_val = self.optimizer.param_groups[0].get("lr", None)
774
+ if lr_val is not None:
775
+ self.log({"train/learning_rate_real": float(lr_val)})
776
+ except Exception:
777
+ pass
778
+
779
+ return (total, outputs) if return_outputs else total
780
+
781
+ def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
782
+ try:
783
+ if not getattr(training_args, "debug_ce_details", False):
784
+ return
785
+ step = int(getattr(self.state, "global_step", 0) or 0)
786
+ every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
787
+ if not (step <= 1 or (step % every_n == 0)):
788
+ return
789
+
790
+ with torch.no_grad():
791
+ vocab = shift_logits.size(-1)
792
+ per_token_loss = F.cross_entropy(
793
+ shift_logits.view(-1, vocab),
794
+ ce_labels.view(-1),
795
+ reduction="none",
796
+ ignore_index=-100,
797
+ ).view_as(ce_labels)
798
+
799
+ valid_mask = ce_labels.ne(-100)
800
+ num_valid = int(valid_mask.sum().item())
801
+ avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
802
+
803
+ per_ex_avgs = []
804
+ max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
805
+ B = ce_labels.size(0)
806
+ for b in range(min(B, max_examples)):
807
+ vb = valid_mask[b]
808
+ if int(vb.sum().item()) > 0:
809
+ per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
810
+ else:
811
+ per_ex_avgs.append(float("nan"))
812
+ logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
813
+ except Exception as e:
814
+ logger.warning(f"CE detailed debug failed: {e}")
815
+
816
+ # --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
817
+
818
+ def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
819
+ # فقط در پراسس اصلی فایل‌ها ذخیره شوند تا از تداخل و خرابی فایل در DDP جلوگیری شود
820
+ if not self.is_world_process_zero():
821
+ return
822
+
823
+ try:
824
+ actual_model = self.model.module if hasattr(self.model, "module") else self.model
825
+ target_dir = output_dir or self.args.output_dir
826
+ lora_out = os.path.join(target_dir, "lora")
827
+ os.makedirs(lora_out, exist_ok=True)
828
+
829
+ # --- LLM PEFT adapters (if LoRA-wrapped) ---
830
+ language_model = getattr(actual_model.model, "language_model", None)
831
+ if hasattr(language_model, "save_pretrained"):
832
+ language_model.save_pretrained(lora_out)
833
+
834
+ # --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
835
+ pred_head = getattr(actual_model.model, "prediction_head", None)
836
+ if hasattr(pred_head, "save_pretrained"):
837
+ ph_dir = os.path.join(lora_out, "diffusion_head")
838
+ os.makedirs(ph_dir, exist_ok=True)
839
+ pred_head.save_pretrained(ph_dir)
840
+
841
+ # --- ALWAYS save FULL diffusion head state_dict for fallback ---
842
+ if pred_head is not None and hasattr(pred_head, "state_dict"):
843
+ sd = pred_head.state_dict()
844
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
845
+ ph_dir = os.path.join(lora_out, "diffusion_head")
846
+ os.makedirs(ph_dir, exist_ok=True)
847
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
848
+
849
+ # --- Connectors (plain state_dicts) ---
850
+ ac = getattr(actual_model.model, "acoustic_connector", None)
851
+ if ac is not None:
852
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
853
+ os.makedirs(ac_dir, exist_ok=True)
854
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
855
+
856
+ se = getattr(actual_model.model, "semantic_connector", None)
857
+ if se is not None:
858
+ se_dir = os.path.join(lora_out, "semantic_connector")
859
+ os.makedirs(se_dir, exist_ok=True)
860
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
861
+
862
+ except Exception as e:
863
+ logger.warning(f"Failed to save LoRA assets: {e}")
864
+
865
+
866
+ # ------------- Build the Trainer -------------
867
+
868
+ # Resolve which adapters to apply in samples
869
+ # توجه: دستگاه به صورت خودکار در Callback مدیریت می‌شود
870
+ ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999)
871
+
872
+ # --- CRITICAL FIX: CAST TRAINABLE PARAMS TO FP32 ---
873
+ # This prevents 'ValueError: Attempting to unscale FP16 gradients'
874
+ if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
875
+ if training_args.local_rank in [-1, 0]:
876
+ print('>>> INFO: Enforcing float32 for trainable parameters (LoRA/Head) to fix GradScaler.')
877
+ # در حالت DDP ممکن است مدل رپ شده باشد، پس بهتر است actual_model را چک کنیم
878
+ actual_model = model.module if hasattr(model, "module") else model
879
+ for name, param in actual_model.named_parameters():
880
+ if param.requires_grad:
881
+ param.data = param.data.to(torch.float32)
882
+ # ---------------------------------------------------
883
+
884
+ trainer = VibeVoiceTrainer(
885
+ model=model,
886
+ args=training_args,
887
+ train_dataset=train_dataset,
888
+ eval_dataset=eval_dataset,
889
+ data_collator=data_collator,
890
+ callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
891
+ )
892
+
893
+ # Optional debug pre-training save
894
+ if getattr(training_args, "debug_save", False):
895
+ if trainer.is_world_process_zero():
896
+ try:
897
+ actual_model = model.module if hasattr(model, "module") else model
898
+ debug_dir = os.path.join(training_args.output_dir, "debug_initial")
899
+ lora_out = os.path.join(debug_dir, "lora")
900
+ os.makedirs(lora_out, exist_ok=True)
901
+ logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
902
+ # language model adapters / base
903
+ try:
904
+ if hasattr(actual_model.model.language_model, "save_pretrained"):
905
+ actual_model.model.language_model.save_pretrained(lora_out)
906
+ except Exception as e_lm:
907
+ logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
908
+ # diffusion head
909
+ try:
910
+ if hasattr(actual_model.model, "prediction_head") and hasattr(actual_model.model.prediction_head, "save_pretrained"):
911
+ actual_model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
912
+ except Exception as e_head:
913
+ logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
914
+ # NEW: full diffusion head state_dict as fallback
915
+ try:
916
+ ph = getattr(actual_model.model, "prediction_head", None)
917
+ if ph is not None and hasattr(ph, "state_dict"):
918
+ sd = ph.state_dict()
919
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
920
+ os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
921
+ torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
922
+ except Exception as e:
923
+ logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
924
+ # connectors
925
+ try:
926
+ ac_conn = getattr(actual_model.model, "acoustic_connector", None)
927
+ if ac_conn is not None:
928
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
929
+ os.makedirs(ac_dir, exist_ok=True)
930
+ torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
931
+ except Exception as e_ac:
932
+ logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
933
+ try:
934
+ se_conn = getattr(actual_model.model, "semantic_connector", None)
935
+ if se_conn is not None:
936
+ se_dir = os.path.join(lora_out, "semantic_connector")
937
+ os.makedirs(se_dir, exist_ok=True)
938
+ torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
939
+ except Exception as e_se:
940
+ logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
941
+ except Exception as e:
942
+ logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
943
+
944
+ if getattr(training_args, "gradient_checkpointing", False):
945
+ try:
946
+ model.gradient_checkpointing_enable()
947
+ except Exception:
948
+ logger.warning("Failed to enable gradient checkpointing on the model.")
949
+
950
+ if training_args.do_train:
951
+ trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
952
+
953
+ if trainer.is_world_process_zero():
954
+ actual_model = model.module if hasattr(model, "module") else model
955
+ lora_out = os.path.join(training_args.output_dir, "lora")
956
+ os.makedirs(lora_out, exist_ok=True)
957
+
958
+ # LLM PEFT (if any)
959
+ lm = getattr(actual_model.model, "language_model", None)
960
+ if hasattr(lm, "save_pretrained"):
961
+ lm.save_pretrained(lora_out)
962
+
963
+ # Diffusion head PEFT (if any)
964
+ ph = getattr(actual_model.model, "prediction_head", None)
965
+ if hasattr(ph, "save_pretrained"):
966
+ ph_dir = os.path.join(lora_out, "diffusion_head")
967
+ os.makedirs(ph_dir, exist_ok=True)
968
+ ph.save_pretrained(ph_dir)
969
+
970
+ # ALWAYS: full diffusion head state_dict fallback
971
+ try:
972
+ if ph is not None and hasattr(ph, "state_dict"):
973
+ sd = ph.state_dict()
974
+ torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
975
+ ph_dir = os.path.join(lora_out, "diffusion_head")
976
+ os.makedirs(ph_dir, exist_ok=True)
977
+ torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
978
+ except Exception as e:
979
+ logger.warning(f"Failed to save FULL diffusion head at end: {e}")
980
+
981
+ # Connectors (if trained)
982
+ try:
983
+ ac = getattr(actual_model.model, "acoustic_connector", None)
984
+ if ac is not None:
985
+ ac_dir = os.path.join(lora_out, "acoustic_connector")
986
+ os.makedirs(ac_dir, exist_ok=True)
987
+ torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
988
+ except Exception as e:
989
+ logger.warning(f"Failed to save acoustic_connector: {e}")
990
+
991
+ try:
992
+ se = getattr(actual_model.model, "semantic_connector", None)
993
+ if se is not None:
994
+ se_dir = os.path.join(lora_out, "semantic_connector")
995
+ os.makedirs(se_dir, exist_ok=True)
996
+ torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
997
+ except Exception as e:
998
+ logger.warning(f"Failed to save semantic_connector: {e}")
999
+
1000
+ if training_args.do_eval and eval_dataset is not None:
1001
+ trainer.evaluate()
1002
+
1003
+
1004
+ if __name__ == "__main__":
1005
+ main()
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/__init__.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/__init__.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/processor/__pycache__/__init__.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/processor/__pycache__/__init__.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_tokenizer_processor.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_tokenizer_processor.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/processor/__pycache__/vibevoice_tokenizer_processor.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/__init__.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/__init__.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/__init__.cpython-312.pyc differ
 
VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/dpm_solver.cpython-312.pyc CHANGED
Binary files a/VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/dpm_solver.cpython-312.pyc and b/VibeVoice-finetuning/src/vibevoice/schedule/__pycache__/dpm_solver.cpython-312.pyc differ
 
VibeVoice-finetuning/wandb/debug-internal.log CHANGED
@@ -1,11 +1,6 @@
1
- {"time":"2026-02-23T17:38:02.523684863Z","level":"INFO","msg":"stream: starting","core version":"0.25.0"}
2
- {"time":"2026-02-23T17:38:02.66532857Z","level":"INFO","msg":"stream: created new stream","id":"fg8jq8cq"}
3
- {"time":"2026-02-23T17:38:02.665424984Z","level":"INFO","msg":"handler: started","stream_id":"fg8jq8cq"}
4
- {"time":"2026-02-23T17:38:02.665526464Z","level":"INFO","msg":"stream: started","id":"fg8jq8cq"}
5
- {"time":"2026-02-23T17:38:02.665548899Z","level":"INFO","msg":"writer: started","stream_id":"fg8jq8cq"}
6
- {"time":"2026-02-23T17:38:02.665551744Z","level":"INFO","msg":"sender: started","stream_id":"fg8jq8cq"}
7
- {"time":"2026-02-23T19:01:20.602748254Z","level":"INFO","msg":"stream: closing","id":"fg8jq8cq"}
8
- {"time":"2026-02-23T19:01:21.129470891Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
9
- {"time":"2026-02-23T19:01:21.200065311Z","level":"INFO","msg":"handler: closed","stream_id":"fg8jq8cq"}
10
- {"time":"2026-02-23T19:01:21.200283137Z","level":"INFO","msg":"sender: closed","stream_id":"fg8jq8cq"}
11
- {"time":"2026-02-23T19:01:21.200320797Z","level":"INFO","msg":"stream: closed","id":"fg8jq8cq"}
 
1
+ {"time":"2026-02-18T14:42:37.394199692Z","level":"INFO","msg":"stream: starting","core version":"0.24.2"}
2
+ {"time":"2026-02-18T14:42:37.612237616Z","level":"INFO","msg":"stream: created new stream","id":"a0h99ykt"}
3
+ {"time":"2026-02-18T14:42:37.614905768Z","level":"INFO","msg":"handler: started","stream_id":"a0h99ykt"}
4
+ {"time":"2026-02-18T14:42:37.615126264Z","level":"INFO","msg":"stream: started","id":"a0h99ykt"}
5
+ {"time":"2026-02-18T14:42:37.61519901Z","level":"INFO","msg":"writer: started","stream_id":"a0h99ykt"}
6
+ {"time":"2026-02-18T14:42:37.615229541Z","level":"INFO","msg":"sender: started","stream_id":"a0h99ykt"}
 
 
 
 
 
VibeVoice-finetuning/wandb/debug.log CHANGED
@@ -1,24 +1,22 @@
1
- 2026-02-23 17:38:02,076 INFO MainThread:240 [wandb_setup.py:_flush():81] Current SDK version is 0.25.0
2
- 2026-02-23 17:38:02,076 INFO MainThread:240 [wandb_setup.py:_flush():81] Configure stats pid to 240
3
- 2026-02-23 17:38:02,076 INFO MainThread:240 [wandb_setup.py:_flush():81] Loading settings from environment variables
4
- 2026-02-23 17:38:02,076 INFO MainThread:240 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /kaggle/working/VibeVoice-finetuning/wandb/run-20260223_173802-fg8jq8cq/logs/debug.log
5
- 2026-02-23 17:38:02,077 INFO MainThread:240 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /kaggle/working/VibeVoice-finetuning/wandb/run-20260223_173802-fg8jq8cq/logs/debug-internal.log
6
- 2026-02-23 17:38:02,077 INFO MainThread:240 [wandb_init.py:init():844] calling init triggers
7
- 2026-02-23 17:38:02,077 INFO MainThread:240 [wandb_init.py:init():849] wandb.init called with sweep_config: {}
8
  config: {'_wandb': {}}
9
- 2026-02-23 17:38:02,077 INFO MainThread:240 [wandb_init.py:init():892] starting backend
10
- 2026-02-23 17:38:02,512 INFO MainThread:240 [wandb_init.py:init():895] sending inform_init request
11
- 2026-02-23 17:38:02,520 INFO MainThread:240 [wandb_init.py:init():903] backend started and connected
12
- 2026-02-23 17:38:02,524 INFO MainThread:240 [wandb_init.py:init():973] updated telemetry
13
- 2026-02-23 17:38:02,525 INFO MainThread:240 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout
14
- 2026-02-23 17:38:03,069 INFO MainThread:240 [wandb_init.py:init():1042] starting run threads in backend
15
- 2026-02-23 17:38:03,749 INFO MainThread:240 [wandb_run.py:_console_start():2524] atexit reg
16
- 2026-02-23 17:38:03,749 INFO MainThread:240 [wandb_run.py:_redirect():2373] redirect: wrap_raw
17
- 2026-02-23 17:38:03,749 INFO MainThread:240 [wandb_run.py:_redirect():2442] Wrapping output streams.
18
- 2026-02-23 17:38:03,749 INFO MainThread:240 [wandb_run.py:_redirect():2465] Redirects installed.
19
- 2026-02-23 17:38:03,755 INFO MainThread:240 [wandb_init.py:init():1082] run started, returning control to user process
20
- 2026-02-23 17:38:03,758 INFO MainThread:240 [wandb_run.py:_config_callback():1403] config_cb None None {'acoustic_tokenizer_config': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', '_attn_implementation_autoset': False, 'model_type': 'vibevoice_acoustic_tokenizer', 'channels': 1, 'corpus_normalize': 0.0, 'causal': True, 'vae_dim': 64, 'fix_std': 0.5, 'std_dist_type': 'gaussian', 'conv_norm': 'none', 'pad_mode': 'constant', 'layernorm_eps': 1e-05, 'disable_last_norm': True, 'layernorm': 'RMSNorm', 'layernorm_elementwise_affine': True, 'conv_bias': True, 'layer_scale_init_value': 1e-06, 'weight_init_value': 0.01, 'mixer_layer': 'depthwise_conv', 'encoder_n_filters': 32, 'encoder_ratios': [8, 5, 5, 4, 2, 2], 'encoder_depths': '3-3-3-3-3-3-8', 'decoder_ratios': [8, 5, 5, 4, 2, 2], 'decoder_n_filters': 32, 'decoder_depths': None}, 'semantic_tokenizer_config': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', '_attn_implementation_autoset': False, 'model_type': 'vibevoice_semantic_tokenizer', 'channels': 1, 'corpus_normalize': 0.0, 'causal': True, 'vae_dim': 128, 'fix_std': 0, 'std_dist_type': 'none', 'conv_norm': 'none', 'pad_mode': 'constant', 'layernorm_eps': 1e-05, 'disable_last_norm': True, 'layernorm': 'RMSNorm', 'layernorm_elementwise_affine': True, 'conv_bias': True, 'layer_scale_init_value': 1e-06, 'weight_init_value': 0.01, 'mixer_layer': 'depthwise_conv', 'encoder_n_filters': 32, 'encoder_ratios': [8, 5, 5, 4, 2, 2], 'encoder_depths': '3-3-3-3-3-3-8'}, 'decoder_config': {'vocab_size': 151936, 'max_position_embeddings': 65536, 'hidden_size': 1536, 'intermediate_size': 8960, 'num_hidden_layers': 28, 'num_attention_heads': 12, 'use_sliding_window': False, 'sliding_window': None, 'max_window_layers': 28, 'num_key_value_heads': 2, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-06, 'use_cache': True, 'rope_theta': 1000000.0, 'rope_scaling': None, 'attention_dropout': 0.0, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', '_attn_implementation_autoset': False, 'model_type': 'qwen2'}, 'diffusion_head_config': {'hidden_size': 1536, 'head_layers': 4, 'head_ffn_ratio': 3.0, 'rms_norm_eps': 1e-05, 'latent_size': 64, 'speech_vae_dim': 64, 'prediction_type': 'v_prediction', 'diffusion_type': 'ddpm', 'ddpm_num_steps': 1000, 'ddpm_num_inference_steps': 20, 'ddpm_beta_schedule': 'cosine', 'ddpm_batch_mul': 4, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', '_attn_implementation_autoset': False, 'model_type': 'vibevoice_diffusion_head'}, 'acoustic_vae_dim': 64, 'semantic_vae_dim': 128, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['VibeVoiceForConditionalGeneration'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'microsoft/VibeVoice-1.5B', '_attn_implementation_autoset': True, 'transformers_version': '4.51.3', 'model_type': 'vibevoice', 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': True, 'do_eval': False, 'do_predict': False, 'eval_strategy': 'no', 'prediction_loss_only': False, 'per_device_train_batch_size': 1, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 14, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 5e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 0.6, 'num_train_epochs': 5.0, 'max_steps': -1, 'lr_scheduler_type': 'cosine', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.06, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Feb23_17-37-19_deb67820e34b', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 10, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 1450, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 100.0, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': False, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'tp_size': 0, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': None, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'include_for_metrics': [], 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'eval_use_gather_object': False, 'average_tokens_across_devices': False, 'ddpm_batch_mul': 1, 'ce_loss_weight': 0.3, 'diffusion_loss_weight': 1.9, 'debug_ce_details': False, 'debug_ce_topk': 5, 'debug_ce_max_examples': 1, 'debug_ce_every_n_steps': 200, 'gradient_clipping': True, 'debug_save': False}
21
- 2026-02-23 17:38:03,769 INFO MainThread:240 [wandb_config.py:__setitem__():155] [no run ID] config set model/num_parameters = 2777881057 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7c7fc7c783e0>>
22
- 2026-02-23 17:38:03,770 INFO MainThread:240 [wandb_run.py:_config_callback():1403] config_cb model/num_parameters 2777881057 None
23
- 2026-02-23 19:01:20,602 INFO wandb-AsyncioManager-main:240 [service_client.py:_forward_responses():134] Reached EOF.
24
- 2026-02-23 19:01:20,602 INFO wandb-AsyncioManager-main:240 [mailbox.py:close():155] Closing mailbox, abandoning 1 handles.
 
1
+ 2026-02-18 14:42:36,718 INFO MainThread:6770 [wandb_setup.py:_flush():81] Current SDK version is 0.24.2
2
+ 2026-02-18 14:42:36,718 INFO MainThread:6770 [wandb_setup.py:_flush():81] Configure stats pid to 6770
3
+ 2026-02-18 14:42:36,718 INFO MainThread:6770 [wandb_setup.py:_flush():81] Loading settings from environment variables
4
+ 2026-02-18 14:42:36,718 INFO MainThread:6770 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /content/VibeVoice-finetuning/wandb/run-20260218_144236-a0h99ykt/logs/debug.log
5
+ 2026-02-18 14:42:36,718 INFO MainThread:6770 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /content/VibeVoice-finetuning/wandb/run-20260218_144236-a0h99ykt/logs/debug-internal.log
6
+ 2026-02-18 14:42:36,718 INFO MainThread:6770 [wandb_init.py:init():844] calling init triggers
7
+ 2026-02-18 14:42:36,718 INFO MainThread:6770 [wandb_init.py:init():849] wandb.init called with sweep_config: {}
8
  config: {'_wandb': {}}
9
+ 2026-02-18 14:42:36,718 INFO MainThread:6770 [wandb_init.py:init():892] starting backend
10
+ 2026-02-18 14:42:37,379 INFO MainThread:6770 [wandb_init.py:init():895] sending inform_init request
11
+ 2026-02-18 14:42:37,388 INFO MainThread:6770 [wandb_init.py:init():903] backend started and connected
12
+ 2026-02-18 14:42:37,392 INFO MainThread:6770 [wandb_init.py:init():973] updated telemetry
13
+ 2026-02-18 14:42:37,418 INFO MainThread:6770 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout
14
+ 2026-02-18 14:42:38,009 INFO MainThread:6770 [wandb_init.py:init():1042] starting run threads in backend
15
+ 2026-02-18 14:42:39,044 INFO MainThread:6770 [wandb_run.py:_console_start():2529] atexit reg
16
+ 2026-02-18 14:42:39,044 INFO MainThread:6770 [wandb_run.py:_redirect():2377] redirect: wrap_raw
17
+ 2026-02-18 14:42:39,044 INFO MainThread:6770 [wandb_run.py:_redirect():2446] Wrapping output streams.
18
+ 2026-02-18 14:42:39,044 INFO MainThread:6770 [wandb_run.py:_redirect():2469] Redirects installed.
19
+ 2026-02-18 14:42:39,048 INFO MainThread:6770 [wandb_init.py:init():1082] run started, returning control to user process
20
+ 2026-02-18 14:42:39,050 INFO MainThread:6770 [wandb_run.py:_config_callback():1404] config_cb None None {'acoustic_tokenizer_config': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', '_attn_implementation_autoset': False, 'model_type': 'vibevoice_acoustic_tokenizer', 'channels': 1, 'corpus_normalize': 0.0, 'causal': True, 'vae_dim': 64, 'fix_std': 0.5, 'std_dist_type': 'gaussian', 'conv_norm': 'none', 'pad_mode': 'constant', 'layernorm_eps': 1e-05, 'disable_last_norm': True, 'layernorm': 'RMSNorm', 'layernorm_elementwise_affine': True, 'conv_bias': True, 'layer_scale_init_value': 1e-06, 'weight_init_value': 0.01, 'mixer_layer': 'depthwise_conv', 'encoder_n_filters': 32, 'encoder_ratios': [8, 5, 5, 4, 2, 2], 'encoder_depths': '3-3-3-3-3-3-8', 'decoder_ratios': [8, 5, 5, 4, 2, 2], 'decoder_n_filters': 32, 'decoder_depths': None}, 'semantic_tokenizer_config': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', '_attn_implementation_autoset': False, 'model_type': 'vibevoice_semantic_tokenizer', 'channels': 1, 'corpus_normalize': 0.0, 'causal': True, 'vae_dim': 128, 'fix_std': 0, 'std_dist_type': 'none', 'conv_norm': 'none', 'pad_mode': 'constant', 'layernorm_eps': 1e-05, 'disable_last_norm': True, 'layernorm': 'RMSNorm', 'layernorm_elementwise_affine': True, 'conv_bias': True, 'layer_scale_init_value': 1e-06, 'weight_init_value': 0.01, 'mixer_layer': 'depthwise_conv', 'encoder_n_filters': 32, 'encoder_ratios': [8, 5, 5, 4, 2, 2], 'encoder_depths': '3-3-3-3-3-3-8'}, 'decoder_config': {'vocab_size': 151936, 'max_position_embeddings': 65536, 'hidden_size': 1536, 'intermediate_size': 8960, 'num_hidden_layers': 28, 'num_attention_heads': 12, 'use_sliding_window': False, 'sliding_window': None, 'max_window_layers': 28, 'num_key_value_heads': 2, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-06, 'use_cache': True, 'rope_theta': 1000000.0, 'rope_scaling': None, 'attention_dropout': 0.0, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', '_attn_implementation_autoset': False, 'model_type': 'qwen2'}, 'diffusion_head_config': {'hidden_size': 1536, 'head_layers': 4, 'head_ffn_ratio': 3.0, 'rms_norm_eps': 1e-05, 'latent_size': 64, 'speech_vae_dim': 64, 'prediction_type': 'v_prediction', 'diffusion_type': 'ddpm', 'ddpm_num_steps': 1000, 'ddpm_num_inference_steps': 20, 'ddpm_beta_schedule': 'cosine', 'ddpm_batch_mul': 4, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '', '_attn_implementation_autoset': False, 'model_type': 'vibevoice_diffusion_head'}, 'acoustic_vae_dim': 64, 'semantic_vae_dim': 128, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['VibeVoiceForConditionalGeneration'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': None, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'microsoft/VibeVoice-1.5B', '_attn_implementation_autoset': True, 'transformers_version': '4.51.3', 'model_type': 'vibevoice', 'output_dir': '/content/', 'overwrite_output_dir': False, 'do_train': True, 'do_eval': False, 'do_predict': False, 'eval_strategy': 'no', 'prediction_loss_only': False, 'per_device_train_batch_size': 1, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 10, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 5e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 0.6, 'num_train_epochs': 8.0, 'max_steps': -1, 'lr_scheduler_type': 'cosine', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.1, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/content/runs/Feb18_14-41-34_d690d73e974e', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 10, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 60, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 80.0, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/content/', 'disable_tqdm': False, 'remove_unused_columns': False, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'tp_size': 0, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': None, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'include_for_metrics': [], 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'eval_use_gather_object': False, 'average_tokens_across_devices': False, 'ddpm_batch_mul': 1, 'ce_loss_weight': 1.1, 'diffusion_loss_weight': 1.7, 'debug_ce_details': False, 'debug_ce_topk': 5, 'debug_ce_max_examples': 1, 'debug_ce_every_n_steps': 200, 'gradient_clipping': True, 'debug_save': False}
21
+ 2026-02-18 14:42:39,062 INFO MainThread:6770 [wandb_config.py:__setitem__():154] [no run ID] config set model/num_parameters = 2740951521 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7d301a5f4980>>
22
+ 2026-02-18 14:42:39,063 INFO MainThread:6770 [wandb_run.py:_config_callback():1404] config_cb model/num_parameters 2740951521 None
 
 
lor/.gitattributes ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.avro filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
7
+ *.ftz filter=lfs diff=lfs merge=lfs -text
8
+ *.gz filter=lfs diff=lfs merge=lfs -text
9
+ *.h5 filter=lfs diff=lfs merge=lfs -text
10
+ *.joblib filter=lfs diff=lfs merge=lfs -text
11
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
13
+ *.mds filter=lfs diff=lfs merge=lfs -text
14
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
+ *.model filter=lfs diff=lfs merge=lfs -text
16
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
17
+ *.npy filter=lfs diff=lfs merge=lfs -text
18
+ *.npz filter=lfs diff=lfs merge=lfs -text
19
+ *.onnx filter=lfs diff=lfs merge=lfs -text
20
+ *.ot filter=lfs diff=lfs merge=lfs -text
21
+ *.parquet filter=lfs diff=lfs merge=lfs -text
22
+ *.pb filter=lfs diff=lfs merge=lfs -text
23
+ *.pickle filter=lfs diff=lfs merge=lfs -text
24
+ *.pkl filter=lfs diff=lfs merge=lfs -text
25
+ *.pt filter=lfs diff=lfs merge=lfs -text
26
+ *.pth filter=lfs diff=lfs merge=lfs -text
27
+ *.rar filter=lfs diff=lfs merge=lfs -text
28
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
29
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
31
+ *.tar filter=lfs diff=lfs merge=lfs -text
32
+ *.tflite filter=lfs diff=lfs merge=lfs -text
33
+ *.tgz filter=lfs diff=lfs merge=lfs -text
34
+ *.wasm filter=lfs diff=lfs merge=lfs -text
35
+ *.xz filter=lfs diff=lfs merge=lfs -text
36
+ *.zip filter=lfs diff=lfs merge=lfs -text
37
+ *.zst filter=lfs diff=lfs merge=lfs -text
38
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
39
+ # Audio files - uncompressed
40
+ *.pcm filter=lfs diff=lfs merge=lfs -text
41
+ *.sam filter=lfs diff=lfs merge=lfs -text
42
+ *.raw filter=lfs diff=lfs merge=lfs -text
43
+ # Audio files - compressed
44
+ *.aac filter=lfs diff=lfs merge=lfs -text
45
+ *.flac filter=lfs diff=lfs merge=lfs -text
46
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
47
+ *.ogg filter=lfs diff=lfs merge=lfs -text
48
+ *.wav filter=lfs diff=lfs merge=lfs -text
49
+ # Image files - uncompressed
50
+ *.bmp filter=lfs diff=lfs merge=lfs -text
51
+ *.gif filter=lfs diff=lfs merge=lfs -text
52
+ *.png filter=lfs diff=lfs merge=lfs -text
53
+ *.tiff filter=lfs diff=lfs merge=lfs -text
54
+ # Image files - compressed
55
+ *.jpg filter=lfs diff=lfs merge=lfs -text
56
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
57
+ *.webp filter=lfs diff=lfs merge=lfs -text
58
+ # Video files - compressed
59
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
60
+ *.webm filter=lfs diff=lfs merge=lfs -text
61
+ VibeVoice-finetuning/checkpoint-2900/trainer_state.json filter=lfs diff=lfs merge=lfs -text
62
+ VibeVoice-finetuning/checkpoint-3600/trainer_state.json filter=lfs diff=lfs merge=lfs -text
63
+ VibeVoice-finetuning/wandb/run-20260218_142500-puguclmi/run-puguclmi.wandb filter=lfs diff=lfs merge=lfs -text
64
+ VibeVoice-finetuning/wandb/run-20260218_143617-09tsct60/run-09tsct60.wandb filter=lfs diff=lfs merge=lfs -text
65
+ VibeVoice-finetuning/wandb/run-20260218_144236-a0h99ykt/run-a0h99ykt.wandb filter=lfs diff=lfs merge=lfs -text
66
+ VibeVoice-finetuning/wandb/run-20260218_174129-ppslurye/files/output.log filter=lfs diff=lfs merge=lfs -text
67
+ VibeVoice-finetuning/wandb/run-20260218_174129-ppslurye/run-ppslurye.wandb filter=lfs diff=lfs merge=lfs -text
lor/VibeVoice-finetuning/checkpoint-3600/lora/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: ''
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.7.1
lor/VibeVoice-finetuning/checkpoint-3600/lora/acoustic_connector/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b88dadd781938d55285c2239be35bbdd41c7c2e0f6b783c2d6a0f1b99505ba4
3
+ size 4927259
lor/VibeVoice-finetuning/checkpoint-3600/lora/adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 128,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 64,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "o_proj",
23
+ "v_proj",
24
+ "gate_proj",
25
+ "k_proj",
26
+ "down_proj",
27
+ "q_proj",
28
+ "up_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM"
31
+ }
lor/VibeVoice-finetuning/checkpoint-3600/lora/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1a82671ca5f79aa3997af0e98a18686d60ea239fc8621083515be3b4c247377
3
+ size 295486584
lor/VibeVoice-finetuning/checkpoint-3600/lora/diffusion_head/config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VibeVoiceDiffusionHead"
4
+ ],
5
+ "ddpm_batch_mul": 4,
6
+ "ddpm_beta_schedule": "cosine",
7
+ "ddpm_num_inference_steps": 20,
8
+ "ddpm_num_steps": 1000,
9
+ "diffusion_type": "ddpm",
10
+ "head_ffn_ratio": 3.0,
11
+ "head_layers": 4,
12
+ "hidden_size": 1536,
13
+ "latent_size": 64,
14
+ "model_type": "vibevoice_diffusion_head",
15
+ "prediction_type": "v_prediction",
16
+ "rms_norm_eps": 1e-05,
17
+ "speech_vae_dim": 64,
18
+ "torch_dtype": "float32",
19
+ "transformers_version": "4.51.3"
20
+ }
lor/VibeVoice-finetuning/checkpoint-3600/lora/diffusion_head/diffusion_head_full.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79dea97375ca53510d2bd5e54de769ab2a3e6790a133d1522b1c8345c6da7926
3
+ size 493128917
lor/VibeVoice-finetuning/checkpoint-3600/lora/diffusion_head/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca4e00d13878abba7408587b100d14678b8f4b9408a47af3ee81e1e57bc792c4
3
+ size 493120120
lor/VibeVoice-finetuning/checkpoint-3600/lora/diffusion_head_full.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79dea97375ca53510d2bd5e54de769ab2a3e6790a133d1522b1c8345c6da7926
3
+ size 493128917
lor/VibeVoice-finetuning/checkpoint-3600/lora/semantic_connector/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abf1e11c63eab8288c5fb88c1dff0f4a771ae717fcc318a261bde35af562a653
3
+ size 5123867
lor/VibeVoice-finetuning/checkpoint-3600/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eb993e6b90063537f565efd745aec18c47ae481d009668ee0088671edf0c982
3
+ size 1577460553
lor/VibeVoice-finetuning/checkpoint-3600/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd7e0ada6dfb700d5bec74f5f5dab751c4b3a1982517ac374735d3b15512b866
3
+ size 1297
lor/VibeVoice-finetuning/checkpoint-3600/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d25a7c7d1067e773f6d82591af38a527c985cbf77cf6fa1669c5e0f34639696a
3
+ size 14645
lor/VibeVoice-finetuning/checkpoint-3600/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0de07336841d438b92ac2af437c756d5b1eee4a3bd0fc209eb2ee92c1bfa99d5
3
+ size 1383
lor/VibeVoice-finetuning/checkpoint-3600/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d57670db4371b577ca4e2f4f0b7c1d8f62a839b54700a8fd81b62af2cf43b53
3
+ size 1465
lor/VibeVoice-finetuning/checkpoint-3600/trainer_state.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5232a642db5aa76ada77dd14ba3d448982b076eff22046f389e0c9dab47cff5a
3
+ size 17479811
preprocessed/.gitattributes ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.avro filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
7
+ *.ftz filter=lfs diff=lfs merge=lfs -text
8
+ *.gz filter=lfs diff=lfs merge=lfs -text
9
+ *.h5 filter=lfs diff=lfs merge=lfs -text
10
+ *.joblib filter=lfs diff=lfs merge=lfs -text
11
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
13
+ *.mds filter=lfs diff=lfs merge=lfs -text
14
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
+ *.model filter=lfs diff=lfs merge=lfs -text
16
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
17
+ *.npy filter=lfs diff=lfs merge=lfs -text
18
+ *.npz filter=lfs diff=lfs merge=lfs -text
19
+ *.onnx filter=lfs diff=lfs merge=lfs -text
20
+ *.ot filter=lfs diff=lfs merge=lfs -text
21
+ *.parquet filter=lfs diff=lfs merge=lfs -text
22
+ *.pb filter=lfs diff=lfs merge=lfs -text
23
+ *.pickle filter=lfs diff=lfs merge=lfs -text
24
+ *.pkl filter=lfs diff=lfs merge=lfs -text
25
+ *.pt filter=lfs diff=lfs merge=lfs -text
26
+ *.pth filter=lfs diff=lfs merge=lfs -text
27
+ *.rar filter=lfs diff=lfs merge=lfs -text
28
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
29
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
31
+ *.tar filter=lfs diff=lfs merge=lfs -text
32
+ *.tflite filter=lfs diff=lfs merge=lfs -text
33
+ *.tgz filter=lfs diff=lfs merge=lfs -text
34
+ *.wasm filter=lfs diff=lfs merge=lfs -text
35
+ *.xz filter=lfs diff=lfs merge=lfs -text
36
+ *.zip filter=lfs diff=lfs merge=lfs -text
37
+ *.zst filter=lfs diff=lfs merge=lfs -text
38
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
39
+ # Audio files - uncompressed
40
+ *.pcm filter=lfs diff=lfs merge=lfs -text
41
+ *.sam filter=lfs diff=lfs merge=lfs -text
42
+ *.raw filter=lfs diff=lfs merge=lfs -text
43
+ # Audio files - compressed
44
+ *.aac filter=lfs diff=lfs merge=lfs -text
45
+ *.flac filter=lfs diff=lfs merge=lfs -text
46
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
47
+ *.ogg filter=lfs diff=lfs merge=lfs -text
48
+ *.wav filter=lfs diff=lfs merge=lfs -text
49
+ # Image files - uncompressed
50
+ *.bmp filter=lfs diff=lfs merge=lfs -text
51
+ *.gif filter=lfs diff=lfs merge=lfs -text
52
+ *.png filter=lfs diff=lfs merge=lfs -text
53
+ *.tiff filter=lfs diff=lfs merge=lfs -text
54
+ # Image files - compressed
55
+ *.jpg filter=lfs diff=lfs merge=lfs -text
56
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
57
+ *.webp filter=lfs diff=lfs merge=lfs -text
58
+ # Video files - compressed
59
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
60
+ *.webm filter=lfs diff=lfs merge=lfs -text
preprocessed/preprocessed_batches.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cdc94373adbe2432d986c30e1a9c212246bdef25416046977ea89a24655ccb9
3
+ size 5094562383