File size: 8,498 Bytes
14ff14a 307eacc d53d2f8 14ff14a 601226e 14ff14a 601226e 14ff14a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 | import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import os
import glob
import wandb
from datasets import load_dataset
from accelerate import Accelerator
import argparse
from src.rwkv7 import RWKV7
from src.dataset import MyDataset
from src.transformer import TransformerModel
class L2Wrap(torch.autograd.Function):
@staticmethod
def forward(ctx, loss, y):
ctx.save_for_backward(y)
return loss
@staticmethod
def backward(ctx, grad_output):
y = ctx.saved_tensors[0]
factor = 1e-4 / (y.shape[0] * y.shape[1])
maxx, ids = torch.max(y, -1, keepdim=True)
gy = torch.zeros_like(y)
gy.scatter_(-1, ids, maxx * factor)
return grad_output, gy
def load_latest_checkpoint(model, checkpoint_dir):
"""
Load the latest checkpoint for the model from the specified directory.
Args:
model: The model to load the checkpoint into
checkpoint_dir: Directory containing checkpoint files (.pt)
"""
checkpoint_files = [f for f in os.listdir(checkpoint_dir) if f.endswith('.pt')]
if not checkpoint_files:
print("No checkpoint files found in the directory.")
return 0
latest_checkpoint = max(checkpoint_files, key=lambda x: os.path.getctime(os.path.join(checkpoint_dir, x)))
checkpoint_path = os.path.join(checkpoint_dir, latest_checkpoint)
model.load_state_dict(torch.load(checkpoint_path))
print(f"Loaded checkpoint: {checkpoint_path}")
def initialize_model(checkpoint_dir, dim, n_blocks):
"""
Initialize the RWKV7 model and load the latest checkpoint.
Args:
checkpoint_dir: Directory containing checkpoint files
dim: Dimension of the model
n_blocks: Number of blocks in the model
Returns:
The initialized model
"""
# Initialize model
model = RWKV7(text_vocab=128, audio_vocab=8192 + 1, dim=dim, n_blocks=n_blocks).cuda()
model = TransformerModel(text_vocab=128, audio_vocab=8192 + 1, dim=dim, n_blocks=n_blocks).cuda()
# Load latest checkpoint
load_latest_checkpoint(model, checkpoint_dir)
# Print model statistics
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Model total parameters: {total_params}")
print(f"Model trainable parameters: {trainable_params}")
return model
def collate_fn(batch):
"""
Custom collate function for DataLoader to handle variable-length sequences.
Args:
batch: A batch of sequences
Returns:
Tuple of (input_ids, targets, loss_masks)
"""
padding_token = 8192
max_length = max(len(seq) for seq in batch) - 1 # Max length excluding the last token
input_ids = []
targets = []
loss_masks = []
for seq in batch:
input_seq = list(seq[:-1]) # Input sequence, excluding the last token
target_seq = list(seq[1:]) # Target sequence, starting from the second token
input_padding = [padding_token] * (max_length - len(input_seq))
target_padding = [padding_token] * (max_length - len(target_seq))
mask_padding = [0] * (max_length - len(input_seq))
input_ids.append(torch.tensor(input_seq + input_padding, dtype=torch.long))
targets.append(torch.tensor(target_seq + target_padding, dtype=torch.long))
loss_masks.append(torch.tensor([1] * len(input_seq) + mask_padding, dtype=torch.long))
return torch.stack(input_ids, dim=0), torch.stack(targets, dim=0), torch.stack(loss_masks, dim=0)
def prepare_dataloader(batch_size):
"""
Prepare dataset and dataloader.
Args:
batch_size: Batch size for training
Returns:
DataLoader for training
"""
# Load dataset
# dataset = load_dataset("JerryAGENDD/JLSpeech_tokenized", cache_dir="../temp_datasets")['train']
dataset = load_dataset("./JLSpeech_tokenized")['train']
dataset = MyDataset(hf_dataset=dataset, train_type='pretrain')
# Create dataloader
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn
)
return dataloader
def train(model, dataloader, num_epochs, output_dir, learning_rate):
"""
Train the model.
Args:
model: The model to train
dataloader: DataLoader for training data
num_epochs: Number of training epochs
output_dir: Directory to save checkpoints
learning_rate: Learning rate for optimizer
"""
# Set up accelerator and optimizer
accelerator = Accelerator()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,weight_decay=1e-4)
model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer)
# Initialize wandb
wandb.init(project="TTS")
# Create output directory if it doesn't exist
os.makedirs(output_dir, exist_ok=True)
# Training loop
model.train()
for epoch in tqdm(range(num_epochs)):
for batch in tqdm(dataloader, leave=False):
input_ids, targets, loss_masks = batch
input_ids = input_ids.long().to('cuda')
targets = targets.long().to('cuda')
loss_masks = loss_masks.to('cuda')
# Forward pass
outputs = model(None,None, input_ids)
# Calculate loss
criterion = torch.nn.CrossEntropyLoss(reduction='none')
loss = criterion(outputs.view(-1, outputs.size(-1)), targets.view(-1))
# Apply loss masks
loss = loss.view(targets.size()) * loss_masks
loss = loss.sum() / loss_masks.sum() # Calculate average loss
loss = L2Wrap.apply(loss, outputs)
# Log to wandb
wandb.log({"loss": loss.item()})
# Backward pass and optimization
optimizer.zero_grad()
accelerator.backward(loss)
optimizer.step()
# Save checkpoint at the end of each epoch
save_checkpoint(model, output_dir, epoch)
# Finish the wandb run
wandb.finish()
def save_checkpoint(model, output_dir, epoch):
"""
Save a model checkpoint.
Args:
model: The model to save
output_dir: Directory to save the checkpoint
epoch: Current epoch number
"""
# Delete all existing checkpoint files
pt_files = glob.glob(os.path.join(output_dir, "*.pt"))
for pt_file in pt_files:
os.remove(pt_file)
# Save current checkpoint
checkpoint_path = os.path.join(output_dir, f"checkpoint_epoch_{epoch + 1}.pt")
torch.save(model.state_dict(), checkpoint_path)
# print(f"Saved checkpoint to {checkpoint_path}")
def main():
"""
dim=256
blocks=12
learning_rate=1e-5
num_epochs=4000
batch_size=128
export HF_ENDPOINT="https://hf-mirror.com"
python pretrain.py --dim $dim --n_blocks $blocks --learning_rate $learning_rate --num_epochs $num_epochs --batch_size $batch_size
"""
import sys
sys.argv.append( '--dim' )
sys.argv.append( '256' )
sys.argv.append( '--n_blocks' )
sys.argv.append( '12' )
sys.argv.append( '--learning_rate' )
sys.argv.append( '1e-5' )
sys.argv.append( '--num_epochs' )
sys.argv.append( '4000' )
sys.argv.append( '--batch_size' )
sys.argv.append( '128' )
parser = argparse.ArgumentParser(description="Train RWKV7 model")
parser.add_argument("--dim", type=int, default=128, help="Dimension of the model")
parser.add_argument("--n_blocks", type=int, default=5, help="Number of blocks in the model")
parser.add_argument("--num_epochs", type=int, default=4000, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate for optimizer")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size for training")
args = parser.parse_args()
# Configuration
checkpoint_dir = "./checkpoints"
# Initialize model
model = initialize_model(checkpoint_dir, args.dim, args.n_blocks)
# Prepare dataloader
dataloader = prepare_dataloader(args.batch_size)
# Train model
train(model, dataloader, args.num_epochs, checkpoint_dir, args.learning_rate)
if __name__ == "__main__":
main() |