datavibe / src /finetune_vibevoice_lora0.py
amir0907's picture
Upload folder using huggingface_hub
b04b45a verified
# اینجا محتوای فایل پایتون خود را بنوی
# train_vibevoice_lora.py
import os
import psutil
import gc
import math
import logging
import copy
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Sampler, DataLoader
from datasets import load_dataset, DatasetDict, VerificationMode
from transformers import (
HfArgumentParser,
Trainer,
set_seed,
TrainerCallback,
)
from transformers import TrainingArguments as HfTrainingArguments
from peft import LoraConfig, get_peft_model, TaskType
from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
# تنظیمات محیطی
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logger = logging.getLogger(__name__)
# ================== DISK-ONLY DATASET & SAMPLER LOGIC ==================
class DiskBatchDataset:
"""
دیتاست مبتنی بر دیسک (Disk-Only):
- همیشه دیتاست را تکه‌تکه (Chunk) می‌کند.
- فایل اصلی را پس از تکه‌تکه کردن پاک می‌کند.
- در هر لحظه فقط یک تکه را در رم نگه می‌دارد.
"""
def __init__(self, file_path: str, num_chunks: int = 4):
self.file_path = file_path
self.chunks_paths = []
self.chunk_lengths = []
self.chunk_boundaries = [0]
self.current_chunk_idx = -1
self.current_chunk_data = None
# نمایش رم موجود صرفاً جهت اطلاع
try:
total_ram = psutil.virtual_memory().total / (1024 ** 3)
logger.info(f"System RAM: {total_ram:.2f} GB (Running in Disk-Only Mode)")
except:
pass
self._init_disk_mode(num_chunks)
def _init_disk_mode(self, num_chunks):
base_dir = os.path.dirname(self.file_path)
base_name = os.path.splitext(os.path.basename(self.file_path))[0]
# بررسی وجود فایل‌های تکه شده
chunks_ready = all(
os.path.exists(os.path.join(base_dir, f"{base_name}_part_{i}.pt"))
for i in range(num_chunks)
)
if not chunks_ready:
if not os.path.exists(self.file_path):
raise FileNotFoundError(f"Neither chunks nor original file found at {self.file_path}")
logger.info(f"Processing dataset: Splitting into {num_chunks} parts...")
full_data = torch.load(self.file_path, map_location='cpu')
total_items = len(full_data)
chunk_size = math.ceil(total_items / num_chunks)
for i in range(num_chunks):
start = i * chunk_size
end = min(start + chunk_size, total_items)
chunk_data = full_data[start:end]
path = os.path.join(base_dir, f"{base_name}_part_{i}.pt")
torch.save(chunk_data, path)
self.chunks_paths.append(path)
self.chunk_lengths.append(len(chunk_data))
logger.info(f"Saved chunk {i}/{num_chunks} ({len(chunk_data)} items)")
del chunk_data
gc.collect()
del full_data
gc.collect()
# === حذف فایل اصلی ===
logger.info(f"Deleting original large file to save space: {self.file_path}")
try:
os.remove(self.file_path)
logger.info("Original file deleted successfully.")
except Exception as e:
logger.warning(f"Could not delete original file: {e}")
# =====================
else:
logger.info("Chunks found on disk. Skipping split.")
for i in range(num_chunks):
path = os.path.join(base_dir, f"{base_name}_part_{i}.pt")
self.chunks_paths.append(path)
# لود سریع برای دریافت طول
temp = torch.load(path, map_location='cpu')
self.chunk_lengths.append(len(temp))
del temp
gc.collect()
self.total_length = sum(self.chunk_lengths)
for length in self.chunk_lengths:
self.chunk_boundaries.append(self.chunk_boundaries[-1] + length)
def _load_chunk(self, chunk_idx):
"""بارگذاری هوشمند تکه مورد نیاز و حذف تکه قبلی"""
if self.current_chunk_idx == chunk_idx:
return
# خالی کردن حافظه
self.current_chunk_data = None
gc.collect()
torch.cuda.empty_cache() # کمک به خالی شدن کش GPU در صورت نیاز
# بارگذاری جدید
# logger.info(f"Loading chunk {chunk_idx} from disk...")
self.current_chunk_data = torch.load(self.chunks_paths[chunk_idx], map_location='cpu')
self.current_chunk_idx = chunk_idx
def __len__(self):
return self.total_length
def __getitem__(self, idx):
# پیدا کردن اینکه ایندکس متعلق به کدام چانک است
chunk_idx = 0
for i, boundary in enumerate(self.chunk_boundaries[1:]):
if idx < boundary:
chunk_idx = i
break
self._load_chunk(chunk_idx)
local_idx = idx - self.chunk_boundaries[chunk_idx]
item = self.current_chunk_data[local_idx]
# کپی دیکشنری برای اطمینان
result = {}
for k, v in item.items():
result[k] = v
return result
class ChunkAwareSampler(Sampler):
"""
سمپلر مخصوص دیسک:
داده‌ها را چانک به چانک درخواست می‌کند تا هارد دیسک درگیر Seek نشود.
ابتدا چانک ۱ (شافل شده)، سپس چانک ۲ (شافل شده) و ...
"""
def __init__(self, dataset: DiskBatchDataset, generator=None):
self.dataset = dataset
self.generator = generator
def __iter__(self):
# شافل درون‌گروهی (Chunk-wise Shuffle)
all_indices = []
for i in range(len(self.dataset.chunks_paths)):
start = self.dataset.chunk_boundaries[i]
end = self.dataset.chunk_boundaries[i+1]
length = end - start
# تولید اندیس‌های تصادفی فقط برای این تکه
perm = torch.randperm(length, generator=self.generator).tolist()
chunk_indices = [p + start for p in perm]
all_indices.extend(chunk_indices)
return iter(all_indices)
def __len__(self):
return len(self.dataset)
class SmartBatchCollator:
def __call__(self, batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
if not batch:
return {}
result = {}
keys = batch[0].keys()
for key in keys:
items = [b[key] for b in batch if b[key] is not None]
if not items:
result[key] = None
continue
if isinstance(items[0], torch.Tensor):
result[key] = torch.cat(items, dim=0)
else:
result[key] = items[0]
return result
# ================== EMA CALLBACK ==================
class EmaCallback(TrainerCallback):
def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cuda"):
self.attr_path = attr_path
self.decay = float(decay)
self.device = torch.device(device)
self.shadow = None
self._orig = None
def _get_module(self, model):
mod = model
for name in self.attr_path.split('.'):
mod = getattr(mod, name)
return mod
def on_train_begin(self, args, state, control, model=None, **kwargs):
head = self._get_module(model)
self.shadow = {k: p.detach().to(self.device).clone()
for k, p in head.state_dict().items()}
def on_step_end(self, args, state, control, model=None, **kwargs):
if self.shadow is None: return
head = self._get_module(model)
with torch.no_grad():
for k, v in head.state_dict().items():
self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
def _swap_in_ema(self, model):
head = self._get_module(model)
self._orig = copy.deepcopy(head.state_dict())
head.load_state_dict(self.shadow, strict=False)
def _swap_back(self, model):
if self._orig is None: return
head = self._get_module(model)
head.load_state_dict(self._orig, strict=False)
self._orig = None
def on_evaluate(self, args, state, control, model=None, **kwargs):
self._swap_in_ema(model)
def on_evaluate_end(self, args, state, control, model=None, **kwargs):
self._swap_back(model)
def on_save(self, args, state, control, model=None, **kwargs):
self._swap_in_ema(model)
def on_save_end(self, args, state, control, model=None, **kwargs):
self._swap_back(model)
def on_train_end(self, args, state, control, model=None, **kwargs):
self._swap_in_ema(model)
# ================== ARGUMENTS ==================
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default=None)
processor_name_or_path: Optional[str] = field(default=None)
cache_dir: Optional[str] = field(default=None)
freeze_acoustic_tokenizer: bool = field(default=True)
freeze_semantic_tokenizer: bool = field(default=True)
lora_r: int = field(default=8)
lora_alpha: int = field(default=32)
lora_dropout: float = field(default=0.05)
lora_target_modules: str = field(
default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj"
)
lora_wrap_diffusion_head: bool = field(default=False)
train_diffusion_head: bool = field(default=False)
train_connectors: bool = field(default=False)
layers_to_freeze: Optional[str] = field(default=None)
@dataclass
class DataArguments:
dataset_name: Optional[str] = field(default=None)
dataset_config_name: Optional[str] = field(default=None)
train_split_name: str = field(default="train")
eval_split_name: Optional[str] = field(default="validation")
text_column_name: str = field(default="text")
audio_column_name: str = field(default="audio")
voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
eval_split_size: float = field(default=0.0)
ignore_verifications: bool = field(default=False)
max_length: Optional[int] = field(default=None)
train_jsonl: Optional[str] = field(default=None)
validation_jsonl: Optional[str] = field(default=None)
voice_prompt_drop_rate: float = field(default=0.0)
@dataclass
class CustomTrainingArguments(HfTrainingArguments):
ddpm_batch_mul: int = field(default=1)
ce_loss_weight: float = field(default=1.0)
diffusion_loss_weight: float = field(default=1.0)
debug_ce_details: bool = field(default=False)
debug_ce_topk: int = field(default=5)
debug_ce_max_examples: int = field(default=1)
debug_ce_every_n_steps: int = field(default=200)
gradient_clipping: bool = field(default=False)
debug_save: bool = field(default=False)
def build_lora_config(args: ModelArguments) -> LoraConfig:
target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
return LoraConfig(
r=args.lora_r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
bias="none",
task_type=TaskType.CAUSAL_LM,
target_modules=target_modules,
)
def build_head_lora_config(args: ModelArguments) -> LoraConfig:
target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
return LoraConfig(
r=args.lora_r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
bias="none",
task_type=TaskType.FEATURE_EXTRACTION,
target_modules=target_modules,
)
def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
shifted = labels[:, 1:].contiguous()
base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
final_mask = base_mask & (~label_is_acoustic)
out = shifted.clone()
out[~final_mask] = pad_id
return out
def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
try:
acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
if acoustic is None or not hasattr(acoustic, "encode"):
return
base_encode = acoustic.encode
def encode_wrapped(*args, **kwargs):
out = base_encode(*args, **kwargs)
try:
_ = out[0][0]
return out
except Exception:
pass
if isinstance(out, dict):
for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
if k in out:
return [[out[k]]]
if len(out) > 0:
return [[next(iter(out.values()))]]
return [[out]]
acoustic.encode = encode_wrapped
logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
except Exception as e:
logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
# ================== MAIN FUNCTION ==================
def main() -> None:
parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.info("Training parameters %s", training_args)
set_seed(training_args.seed)
if not getattr(training_args, "gradient_clipping", False):
if hasattr(training_args, "max_grad_norm"):
training_args.max_grad_norm = 0.0
else:
if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
training_args.max_grad_norm = 1.0
# Load Processor
processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
if processor_path is None:
raise ValueError("--model_name_or_path must be provided")
processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
tok = processor.tokenizer
for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
if not hasattr(tok, required) or getattr(tok, required) is None:
raise RuntimeError(f"Tokenizer missing required special id: {required}")
# Load Model
dtype = torch.float32
if training_args.bf16:
dtype = torch.bfloat16
elif getattr(training_args, "fp16", False):
dtype = torch.float16
model = VibeVoiceForConditionalGeneration.from_pretrained(
model_args.model_name_or_path,
torch_dtype=dtype, device_map={"": 0},
)
_patch_acoustic_encode_for_legacy_indexing(model, logger)
processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
# Hard-tie LM head
try:
emb_module = model.get_input_embeddings()
head_module = model.get_output_embeddings()
if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
with torch.no_grad():
head_module.weight = emb_module.weight
logger.info("Force-tied LM head weight to input embeddings.")
except Exception as e:
logger.warning(f"Force-tie of LM head failed: {e}")
# Configure Model Params
if hasattr(model.config, "use_cache") and training_args.do_train:
model.config.use_cache = False
# Freeze Tokenizers
if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
for p in model.model.acoustic_tokenizer.parameters():
p.requires_grad = False
if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
for p in model.model.semantic_tokenizer.parameters():
p.requires_grad = False
# LoRA Wrapping (LLM)
lora_cfg = build_lora_config(model_args)
tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable") for t in tm_lower)
if not skip_lm_lora:
model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
try:
model.tie_weights()
except Exception:
pass
# Freeze all initially
for _, p in model.named_parameters():
p.requires_grad = False
# Enable LoRA
try:
for n, p in model.model.language_model.named_parameters():
if "lora_A" in n or "lora_B" in n:
p.requires_grad = True
except Exception:
logger.warning("Could not re-enable LoRA params on language_model.")
# Diffusion Head LoRA or Full Train
if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
class _HeadForwardShim(nn.Module):
def __init__(self, base: nn.Module): super().__init__(); self.base = base
def forward(self, *args, **kwargs):
if len(args) >= 3:
noisy_images, timesteps, condition = args[:3]
else:
noisy_images = kwargs.get("noisy_images")
timesteps = kwargs.get("timesteps")
condition = kwargs.get("condition")
return self.base(noisy_images, timesteps, condition)
try:
shim = _HeadForwardShim(model.model.prediction_head)
model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
for n, p in model.model.prediction_head.named_parameters():
if "lora_A" in n or "lora_B" in n:
p.requires_grad = True
except Exception as e:
logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
for p in model.model.prediction_head.parameters():
p.requires_grad = True
if getattr(model_args, "train_connectors", False):
if hasattr(model.model, "acoustic_connector"):
for p in model.model.acoustic_connector.parameters():
p.requires_grad = True
if hasattr(model.model, "semantic_connector"):
for p in model.model.semantic_connector.parameters():
p.requires_grad = True
# Freeze embedding
try:
emb = model.get_input_embeddings()
if hasattr(emb, "weight"):
emb.weight.requires_grad_(False)
head = model.get_output_embeddings()
if head is not None and hasattr(head, "weight"):
head.weight.requires_grad_(False)
except Exception:
pass
# ================== DATASET LOADING ==================
preprocessed_dir = os.path.join(training_args.output_dir, "preprocessed")
preprocessed_file = os.path.join(preprocessed_dir, "preprocessed_batches.pt")
if os.path.exists(preprocessed_file):
logger.info(f"Loading preprocessed data from {preprocessed_file}")
# استفاده از کلاس DiskBatchDataset
# این کلاس فایل اصلی را به 4 قسمت تقسیم کرده و سپس فایل اصلی را حذف می‌کند.
train_dataset = DiskBatchDataset(
preprocessed_file,
num_chunks=4
)
eval_dataset = None
data_collator = SmartBatchCollator()
else:
logger.info(f"Preprocessed data not found. Loading from raw sources.")
verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
if data_args.train_jsonl is not None:
data_files = {"train": data_args.train_jsonl}
if data_args.validation_jsonl:
data_files["validation"] = data_args.validation_jsonl
raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
else:
if data_args.dataset_name is None:
raise ValueError("Provide --dataset_name or --train_jsonl.")
raw = load_dataset(data_args.dataset_name, data_args.dataset_config_name, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
train_ds = raw[data_args.train_split_name]
eval_ds = None
if training_args.do_eval:
if data_args.eval_split_name in raw:
eval_ds = raw[data_args.eval_split_name]
elif data_args.eval_split_size > 0:
split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
train_ds, eval_ds = split["train"], split["test"]
train_dataset = VibeVoiceDataset(
train_ds,
text_column=data_args.text_column_name,
audio_column=data_args.audio_column_name,
voice_prompts_column=data_args.voice_prompts_column_name,
)
eval_dataset = VibeVoiceDataset(eval_ds, text_column=data_args.text_column_name, audio_column=data_args.audio_column_name, voice_prompts_column=data_args.voice_prompts_column_name) if eval_ds else None
speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
semantic_dim = getattr(model.config, "semantic_vae_dim", 128)
compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
data_collator = VibeVoiceCollator(
processor=processor,
max_length=data_args.max_length,
speech_compress_ratio=speech_compress_ratio,
semantic_vae_dim=semantic_dim,
compute_semantics=compute_semantics_flag,
voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
)
# ================== TRAINER ==================
class VibeVoiceTrainer(Trainer):
def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
labels = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask")
acoustic_input_mask = inputs.get("acoustic_input_mask")
sem = inputs.get("speech_semantic_tensors", None)
try:
target_dtype = next(model.model.semantic_connector.parameters()).dtype
except Exception:
target_dtype = model.get_input_embeddings().weight.dtype
if sem is None:
sm = inputs.get("speech_masks")
if sm is not None:
zeros = torch.zeros(sm.size(0), sm.size(1), getattr(model.config, "semantic_vae_dim", 128), dtype=target_dtype, device=sm.device)
inputs["speech_semantic_tensors"] = zeros
elif isinstance(sem, torch.Tensor):
inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
outputs = model(
input_ids=inputs.get("input_ids"),
attention_mask=attention_mask,
speech_tensors=inputs.get("speech_tensors"),
speech_masks=inputs.get("speech_masks"),
speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
acoustic_input_mask=acoustic_input_mask,
acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
speeches_loss_input=inputs.get("speeches_loss_input"),
ddpm_batch_mul=training_args.ddpm_batch_mul,
)
logits = outputs.logits
ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
shift_logits = logits[:, :-1, :].contiguous()
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
try:
prefix = "train" if model.training else "eval"
self.log({f"{prefix}/ce_loss": ce_loss.detach().item(), f"{prefix}/diffusion_loss": diffusion_loss.detach().item()})
except Exception:
pass
return (total, outputs) if return_outputs else total
def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
target_dir = output_dir or self.args.output_dir
lora_out = os.path.join(target_dir, "lora")
os.makedirs(lora_out, exist_ok=True)
lm = getattr(self.model.model, "language_model", None)
if hasattr(lm, "save_pretrained"):
lm.save_pretrained(lora_out)
pred_head = getattr(self.model.model, "prediction_head", None)
if pred_head is not None:
if hasattr(pred_head, "save_pretrained"):
ph_dir = os.path.join(lora_out, "diffusion_head")
os.makedirs(ph_dir, exist_ok=True)
pred_head.save_pretrained(ph_dir)
if hasattr(pred_head, "state_dict"):
sd = pred_head.state_dict()
ph_dir = os.path.join(lora_out, "diffusion_head")
os.makedirs(ph_dir, exist_ok=True)
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
for comp_name in ["acoustic_connector", "semantic_connector"]:
comp = getattr(self.model.model, comp_name, None)
if comp is not None:
d = os.path.join(lora_out, comp_name)
os.makedirs(d, exist_ok=True)
torch.save(comp.state_dict(), os.path.join(d, "pytorch_model.bin"))
# OVERRIDE DATALOADER TO USE DISK SAMPLER
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
if isinstance(train_dataset, DiskBatchDataset):
# استفاده اجباری از ChunkAwareSampler برای جلوگیری از Thrashing دیسک
sampler = ChunkAwareSampler(train_dataset)
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
sampler=sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=0, # برای دیسک مود باید 0 باشد
pin_memory=self.args.dataloader_pin_memory,
)
else:
return super().get_train_dataloader()
ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cuda")
if getattr(training_args, 'fp16', False) or getattr(training_args, 'bf16', False):
logger.info('>>> Enforcing float32 for trainable parameters to fix GradScaler.')
for name, param in model.named_parameters():
if param.requires_grad:
param.data = param.data.to(torch.float32)
trainer = VibeVoiceTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=data_collator,
callbacks=[ema_cb],
)
if getattr(training_args, "gradient_checkpointing", False):
model.gradient_checkpointing_enable()
if training_args.do_train:
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
trainer.save_model()
if __name__ == "__main__":
main()
import pandas as pd
import numpy as np
def greet():
print("این فایل توسط دستور writefile ساخته شده است.")
if __name__ == "__main__":
greet()