| import torch |
| from torch.utils.data import Dataset, DataLoader |
| from tqdm import tqdm |
| import os |
| import glob |
| import wandb |
| from datasets import load_dataset |
| from accelerate import Accelerator |
| import argparse |
|
|
| from src.rwkv7 import RWKV7 |
| from src.dataset import MyDataset |
| from src.transformer import TransformerModel |
|
|
| class L2Wrap(torch.autograd.Function): |
| @staticmethod |
| def forward(ctx, loss, y): |
| ctx.save_for_backward(y) |
| return loss |
|
|
| @staticmethod |
| def backward(ctx, grad_output): |
| y = ctx.saved_tensors[0] |
| factor = 1e-4 / (y.shape[0] * y.shape[1]) |
| maxx, ids = torch.max(y, -1, keepdim=True) |
| gy = torch.zeros_like(y) |
| gy.scatter_(-1, ids, maxx * factor) |
| return grad_output, gy |
|
|
| def load_latest_checkpoint(model, checkpoint_dir): |
| """ |
| Load the latest checkpoint for the model from the specified directory. |
| |
| Args: |
| model: The model to load the checkpoint into |
| checkpoint_dir: Directory containing checkpoint files (.pt) |
| """ |
| checkpoint_files = [f for f in os.listdir(checkpoint_dir) if f.endswith('.pt')] |
| if not checkpoint_files: |
| print("No checkpoint files found in the directory.") |
| return 0 |
| latest_checkpoint = max(checkpoint_files, key=lambda x: os.path.getctime(os.path.join(checkpoint_dir, x))) |
| checkpoint_path = os.path.join(checkpoint_dir, latest_checkpoint) |
| model.load_state_dict(torch.load(checkpoint_path)) |
| print(f"Loaded checkpoint: {checkpoint_path}") |
|
|
| def initialize_model(checkpoint_dir, dim, n_blocks): |
| """ |
| Initialize the RWKV7 model and load the latest checkpoint. |
| |
| Args: |
| checkpoint_dir: Directory containing checkpoint files |
| dim: Dimension of the model |
| n_blocks: Number of blocks in the model |
| |
| Returns: |
| The initialized model |
| """ |
| |
| model = RWKV7(text_vocab=128, audio_vocab=8192 + 1, dim=dim, n_blocks=n_blocks).cuda() |
| model = TransformerModel(text_vocab=128, audio_vocab=8192 + 1, dim=dim, n_blocks=n_blocks).cuda() |
| |
| load_latest_checkpoint(model, checkpoint_dir) |
| |
| |
| total_params = sum(p.numel() for p in model.parameters()) |
| trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) |
| print(f"Model total parameters: {total_params}") |
| print(f"Model trainable parameters: {trainable_params}") |
| |
| return model |
|
|
| def collate_fn(batch): |
| """ |
| Custom collate function for DataLoader to handle variable-length sequences. |
| |
| Args: |
| batch: A batch of sequences |
| |
| Returns: |
| Tuple of (input_ids, targets, loss_masks) |
| """ |
| padding_token = 8192 |
| max_length = max(len(seq) for seq in batch) - 1 |
|
|
| input_ids = [] |
| targets = [] |
| loss_masks = [] |
|
|
| for seq in batch: |
| input_seq = list(seq[:-1]) |
| target_seq = list(seq[1:]) |
| input_padding = [padding_token] * (max_length - len(input_seq)) |
| target_padding = [padding_token] * (max_length - len(target_seq)) |
| mask_padding = [0] * (max_length - len(input_seq)) |
|
|
| input_ids.append(torch.tensor(input_seq + input_padding, dtype=torch.long)) |
| targets.append(torch.tensor(target_seq + target_padding, dtype=torch.long)) |
| loss_masks.append(torch.tensor([1] * len(input_seq) + mask_padding, dtype=torch.long)) |
|
|
| return torch.stack(input_ids, dim=0), torch.stack(targets, dim=0), torch.stack(loss_masks, dim=0) |
|
|
| def prepare_dataloader(batch_size): |
| """ |
| Prepare dataset and dataloader. |
| |
| Args: |
| batch_size: Batch size for training |
| |
| Returns: |
| DataLoader for training |
| """ |
| |
| |
| dataset = load_dataset("./JLSpeech_tokenized")['train'] |
| dataset = MyDataset(hf_dataset=dataset, train_type='pretrain') |
| |
| |
| dataloader = DataLoader( |
| dataset, |
| batch_size=batch_size, |
| shuffle=True, |
| collate_fn=collate_fn |
| ) |
| |
| return dataloader |
|
|
| def train(model, dataloader, num_epochs, output_dir, learning_rate): |
| """ |
| Train the model. |
| |
| Args: |
| model: The model to train |
| dataloader: DataLoader for training data |
| num_epochs: Number of training epochs |
| output_dir: Directory to save checkpoints |
| learning_rate: Learning rate for optimizer |
| """ |
| |
| accelerator = Accelerator() |
| optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,weight_decay=1e-4) |
| model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) |
| |
| |
| wandb.init(project="TTS") |
| |
| |
| os.makedirs(output_dir, exist_ok=True) |
| |
| |
| model.train() |
| for epoch in tqdm(range(num_epochs)): |
| for batch in tqdm(dataloader, leave=False): |
| input_ids, targets, loss_masks = batch |
| input_ids = input_ids.long().to('cuda') |
| targets = targets.long().to('cuda') |
| loss_masks = loss_masks.to('cuda') |
|
|
| |
| outputs = model(None,None, input_ids) |
| |
| |
| criterion = torch.nn.CrossEntropyLoss(reduction='none') |
| loss = criterion(outputs.view(-1, outputs.size(-1)), targets.view(-1)) |
| |
| loss = loss.view(targets.size()) * loss_masks |
| loss = loss.sum() / loss_masks.sum() |
| loss = L2Wrap.apply(loss, outputs) |
| |
| |
| wandb.log({"loss": loss.item()}) |
| |
| |
| optimizer.zero_grad() |
| accelerator.backward(loss) |
| optimizer.step() |
|
|
| |
| save_checkpoint(model, output_dir, epoch) |
| |
| |
| wandb.finish() |
|
|
| def save_checkpoint(model, output_dir, epoch): |
| """ |
| Save a model checkpoint. |
| |
| Args: |
| model: The model to save |
| output_dir: Directory to save the checkpoint |
| epoch: Current epoch number |
| """ |
| |
| pt_files = glob.glob(os.path.join(output_dir, "*.pt")) |
| for pt_file in pt_files: |
| os.remove(pt_file) |
|
|
| |
| checkpoint_path = os.path.join(output_dir, f"checkpoint_epoch_{epoch + 1}.pt") |
| torch.save(model.state_dict(), checkpoint_path) |
| |
|
|
| def main(): |
| """ |
| dim=256 |
| blocks=12 |
| |
| learning_rate=1e-5 |
| num_epochs=4000 |
| batch_size=128 |
| |
| export HF_ENDPOINT="https://hf-mirror.com" |
| python pretrain.py --dim $dim --n_blocks $blocks --learning_rate $learning_rate --num_epochs $num_epochs --batch_size $batch_size |
| """ |
| import sys |
| sys.argv.append( '--dim' ) |
| sys.argv.append( '256' ) |
| sys.argv.append( '--n_blocks' ) |
| sys.argv.append( '12' ) |
| sys.argv.append( '--learning_rate' ) |
| sys.argv.append( '1e-5' ) |
| sys.argv.append( '--num_epochs' ) |
| sys.argv.append( '4000' ) |
| sys.argv.append( '--batch_size' ) |
| sys.argv.append( '128' ) |
|
|
| parser = argparse.ArgumentParser(description="Train RWKV7 model") |
| parser.add_argument("--dim", type=int, default=128, help="Dimension of the model") |
| parser.add_argument("--n_blocks", type=int, default=5, help="Number of blocks in the model") |
| parser.add_argument("--num_epochs", type=int, default=4000, help="Number of training epochs") |
| parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate for optimizer") |
| parser.add_argument("--batch_size", type=int, default=128, help="Batch size for training") |
| args = parser.parse_args() |
|
|
| |
| checkpoint_dir = "./checkpoints" |
| |
| |
| model = initialize_model(checkpoint_dir, args.dim, args.n_blocks) |
| |
| |
| dataloader = prepare_dataloader(args.batch_size) |
| |
| |
| train(model, dataloader, args.num_epochs, checkpoint_dir, args.learning_rate) |
|
|
| if __name__ == "__main__": |
| main() |