S2S / non_empty.py
Vikrantyadav11234's picture
Add files using upload-large-folder tool
b54655d verified
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio # Added for audio loading
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from torch.utils.data import DataLoader, Dataset
from torch.optim import AdamW
from tqdm import tqdm
import numpy as np
import itertools
import glob
from torch.cuda.amp import autocast, GradScaler
# Constants and Utilities
LRELU_SLOPE = 0.1
SAMPLE_RATE = 16000 # Added sample rate constant
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
# [Keep all model components unchanged]
# Model Components
class ResBlock1(nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class EmbeddingGenerator(nn.Module):
def __init__(self, h):
super().__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv1d(h.embedding_dim, h.upsample_initial_channel, 7, 1, padding=3))
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(h.upsample_initial_channel//(2**i),
h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(ResBlock1(h, ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
# Remove the transpose operation here
x = self.conv_pre(x) # Input shape should be (B, embedding_dim, T)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = torch.utils.checkpoint.checkpoint(self.ups[i], x) # Checkpointing
xs = None
for j in range(self.num_kernels):
res_out = torch.utils.checkpoint.checkpoint(self.resblocks[i*self.num_kernels+j], x)
xs = res_out if xs is None else xs + res_out
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
return torch.tanh(x)
def remove_weight_norm(self):
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super().__init__()
self.period = period
norm_f = spectral_norm if use_spectral_norm else weight_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
b, c, t = x.shape
# Correct padding calculation
required_length = ((t + self.period - 1) // self.period) * self.period
n_pad = required_length - t
if n_pad != 0:
x = F.pad(x, (0, n_pad), "reflect")
x = x.view(b, c, required_length // self.period, self.period)
for conv in self.convs:
x = conv(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
return torch.flatten(x, 1, -1), fmap
class MultiPeriodDiscriminator(nn.Module):
def __init__(self):
super().__init__()
self.discriminators = nn.ModuleList([
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
])
def forward(self, y, y_hat):
y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], []
for d in self.discriminators:
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
y_d_gs.append(y_d_g)
fmap_rs.append(fmap_r)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(nn.Module):
def __init__(self, use_spectral_norm=False):
super().__init__()
norm_f = spectral_norm if use_spectral_norm else weight_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for conv in self.convs:
x = conv(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
return torch.flatten(x, 1, -1), fmap
class MultiScaleDiscriminator(nn.Module):
def __init__(self):
super().__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True),
DiscriminatorS(),
DiscriminatorS(),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=2),
AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat):
y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
y_d_gs.append(y_d_g)
fmap_rs.append(fmap_r)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
# Loss Functions
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
loss += (torch.mean((1-dr)**2) + torch.mean(dg**2))
return loss
def generator_loss(disc_outputs):
loss = 0
for dg in disc_outputs:
loss += torch.mean((1-dg)**2)
return loss
# Training Infrastructure
class Hparams:
def __init__(self):
self.embedding_dim = 1024
self.upsample_rates = [10, 8, 4, 1]
self.upsample_kernel_sizes = [20, 16, 8, 4]
self.upsample_initial_channel = 256
self.resblock_kernel_sizes = [3, 7]
self.resblock_dilation_sizes = [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
# Modified Dataset Class
class EmbeddingAudioDataset(Dataset):
def __init__(self, embedding_files, audio_files, max_length=16000*3):
self.embedding_files = embedding_files
self.audio_files = audio_files
self.max_length = max_length
self.resampler = torchaudio.transforms.Resample(orig_freq=SAMPLE_RATE, new_freq=SAMPLE_RATE)
def __len__(self):
return len(self.embedding_files)
def __getitem__(self, idx):
# Load embedding and convert to tensor immediately
embedding = np.load(self.embedding_files[idx], allow_pickle=True)
if embedding.ndim == 1:
embedding = embedding.reshape(1, -1)
embedding = torch.from_numpy(embedding).float() # Convert to tensor here
# Load and process audio
waveform, orig_sr = torchaudio.load(self.audio_files[idx])
if waveform.shape[0] > 1:
waveform = torch.mean(waveform, dim=0, keepdim=True)
if orig_sr != SAMPLE_RATE:
waveform = self.resampler(waveform)
# Audio truncation/padding
if waveform.shape[1] > self.max_length:
start = torch.randint(0, waveform.shape[1] - self.max_length, (1,))
waveform = waveform[:, start:start+self.max_length]
else:
waveform = F.pad(waveform, (0, self.max_length - waveform.shape[1]))
# Embedding truncation/padding
emb_len = self.max_length // 320
if embedding.shape[0] > emb_len:
embedding = embedding[:emb_len]
else:
# Pad time dimension (axis 0) with zeros
embedding = F.pad(embedding, (0, 0, 0, emb_len - embedding.shape[0]))
return embedding, waveform.squeeze().float()
# # Load embedding
# embedding = np.load(self.embedding_files[idx], allow_pickle=True) # Fix 1: Allow pickles
# embedding.shape
# # Ensure 2D shape: (time_steps, embedding_dim)
# if embedding.ndim == 1:
# embedding = embedding.reshape(1, -1) # Add time dimension if missing
# # Load and process audio
# waveform, orig_sr = torchaudio.load(self.audio_files[idx]) # Fix 2: Proper audio loading
# if waveform.shape[0] > 1: # Convert to mono
# waveform = torch.mean(waveform, dim=0, keepdim=True)
# if orig_sr != SAMPLE_RATE:
# waveform = self.resampler(waveform)
# waveform = waveform / torch.max(torch.abs(waveform)) # Normalize
# # Truncate/pad audio to fixed length
# if waveform.shape[1] > self.max_length:
# start = torch.randint(0, waveform.shape[1] - self.max_length, (1,))
# waveform = waveform[:, start:start+self.max_length]
# else:
# waveform = F.pad(waveform, (0, self.max_length - waveform.shape[1]))
# # Truncate/pad embeddings correspondingly
# emb_len = self.max_length // 320 # 320 samples per embedding step
# if embedding.shape[0] > emb_len:
# embedding = embedding[:emb_len]
# else:
# embedding = F.pad(embedding, (0, 0, 0, emb_len - embedding.shape[0]))
# return embedding.float(), waveform.squeeze().float()
# return torch.from_numpy(embedding).float(), waveform.squeeze().float()
# [Keep rest of the code unchanged until main execution]
if __name__ == "__main__":
h = Hparams()
# In main execution
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Initialize models
generator = EmbeddingGenerator(h).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiScaleDiscriminator().to(device)
# Optimizers
optim_g = AdamW(generator.parameters(), lr=0.0002, betas=(0.8, 0.99))
optim_d = AdamW(itertools.chain(mpd.parameters(), msd.parameters()),
lr=0.0002, betas=(0.8, 0.99))
scaler_g = torch.amp.GradScaler()
scaler_d = torch.amp.GradScaler()
# Dataset
embedding_files = sorted(glob.glob('/home/vikrant/Conversational-AI-Model/embedding_vocoder/embeddings/*.npy'))
audio_files = sorted(glob.glob('/home/vikrant/Conversational-AI-Model/embedding_vocoder/non_empty_wavs/*.wav'))
assert len(embedding_files) == len(audio_files), "Mismatched files"
loader = DataLoader(
EmbeddingAudioDataset(embedding_files, audio_files),
batch_size=8, shuffle=True,persistent_workers=False, num_workers=4, pin_memory=False
)
# # Training loop modifications
# for epoch in range(num_epochs):
# generator.train()
# mpd.train()
# msd.train()
# for batch_idx, (embeddings, audio) in enumerate(tqdm(loader)):
# embeddings, audio = embeddings.to(device), audio.to(device)
# # Add channel dimension to audio
# audio = audio.unsqueeze(1) # Shape: (B, 1, T)
# # Convert embeddings to (B, D, T)
# if embeddings.ndim == 2: # Single sample case
# embeddings = embeddings.unsqueeze(0).permute(0, 2, 1)
# else: # Batch case
# embeddings = embeddings.permute(0, 2, 1)
# # --- Mixed precision forward pass ---
# with autocast():
# # Generate audio
# fake_audio = generator(embeddings)
# # Pad/Crop to match target length
# target_len = audio.size(2)
# fake_audio = fake_audio[:, :, :target_len] if fake_audio.size(2) > target_len \
# else F.pad(fake_audio, (0, target_len - fake_audio.size(2)))
# # ---------------------------
# # Discriminator training
# # ---------------------------
# optim_d.zero_grad()
# # Mixed precision for discriminator forward
# with autocast():
# # Get discriminator outputs
# y_d_rs_mpd, y_d_gs_mpd, _, _ = mpd(audio, fake_audio.detach())
# y_d_rs_msd, y_d_gs_msd, _, _ = msd(audio, fake_audio.detach())
# # Calculate discriminator loss
# loss_disc = discriminator_loss(y_d_rs_mpd + y_d_rs_msd,
# y_d_gs_mpd + y_d_gs_msd)
# # Scaled backward
# scaler_d.scale(loss_disc).backward()
# scaler_d.step(optim_d)
# scaler_d.update()
# # ---------------------------
# # Generator training
# # ---------------------------
# optim_g.zero_grad()
# # Mixed precision for generator forward
# with autocast():
# # Get discriminator outputs and feature maps
# y_d_gs_mpd, _, fmap_rs_mpd, fmap_gs_mpd = mpd(audio, fake_audio)
# y_d_gs_msd, _, fmap_rs_msd, fmap_gs_msd = msd(audio, fake_audio)
# # Calculate losses
# loss_gen = generator_loss(y_d_gs_mpd + y_d_gs_msd)
# loss_feat = feature_loss(fmap_rs_mpd + fmap_rs_msd,
# fmap_gs_mpd + fmap_gs_msd)
# total_loss = loss_gen + loss_feat
# # Scaled backward
# scaler_g.scale(total_loss).backward()
# scaler_g.step(optim_g)
# scaler_g.update()
# if batch_idx % 100 == 0:
# print(f"Epoch {epoch} Batch {batch_idx} | G Loss: {loss_gen.item():.3f} | D Loss: {loss_disc.item():.3f}")
# # # Training loop
# # num_epochs = 1
# # for epoch in range(num_epochs):
# # generator.train()
# # mpd.train()
# # msd.train()
# # for batch_idx, (embeddings, audio) in enumerate(tqdm(loader)):
# # embeddings, audio = embeddings.to(device), audio.to(device)
# # # Add channel dimension to audio
# # audio = audio.unsqueeze(1) # Shape: (B, 1, T)
# # # Convert embeddings to (B, D, T)
# # if embeddings.ndim == 2: # Single sample case
# # embeddings = embeddings.unsqueeze(0).permute(0, 2, 1)
# # else: # Batch case
# # embeddings = embeddings.permute(0, 2, 1)
# # # Generate audio
# # fake_audio = generator(embeddings)
# # # # Calculate required length based on embeddings
# # # expected_samples = embeddings.shape[2] * 320 # 320 samples per embedding step
# # # fake_audio = fake_audio[:, :, :expected_samples]
# # target_len = audio.size(2) # audio is now (B, 1, T)
# # fake_audio = fake_audio[:, :, :target_len] if fake_audio.size(2) > target_len \
# # else F.pad(fake_audio, (0, target_len - fake_audio.size(2)))
# # # Discriminator training
# # optim_d.zero_grad()
# # # _, disc_real_mpd, _, disc_real_msd = mpd(audio, fake_audio.detach())
# # # _, disc_fake_mpd, _, disc_fake_msd = msd(audio, fake_audio.detach())
# # # Get discriminator outputs
# # y_d_rs_mpd, y_d_gs_mpd, _, _ = mpd(audio, fake_audio.detach()) # First 2 returns are real/generated outputs
# # y_d_rs_msd, y_d_gs_msd, _, _ = msd(audio, fake_audio.detach()) # First 2 returns are real/generated outputs
# # # loss_disc = discriminator_loss(disc_real_mpd + disc_real_msd, disc_fake_mpd + disc_fake_msd)
# # loss_disc = discriminator_loss(y_d_rs_mpd + y_d_rs_msd, # All real outputs
# # y_d_gs_mpd + y_d_gs_msd) # All generated outputs
# # loss_disc.backward()
# # optim_d.step()
# # # Generator training
# # optim_g.zero_grad()
# # # disc_out_mpd, disc_out_msd, fmap_mpd, fmap_msd = mpd(audio, fake_audio)
# # # Get discriminator outputs and feature maps
# # y_d_gs_mpd, _, fmap_rs_mpd, fmap_gs_mpd = mpd(audio, fake_audio)
# # y_d_gs_msd, _, fmap_rs_msd, fmap_gs_msd = msd(audio, fake_audio)
# # # loss_gen = generator_loss(disc_out_mpd + disc_out_msd)
# # # loss_feat = feature_loss(fmap_mpd + fmap_msd)
# # # (loss_gen + loss_feat).backward()
# # # optim_g.step()
# # # Calculate losses
# # loss_gen = generator_loss(y_d_gs_mpd + y_d_gs_msd)
# # loss_feat = feature_loss(fmap_rs_mpd + fmap_rs_msd,
# # fmap_gs_mpd + fmap_gs_msd)
# # (loss_gen + loss_feat).backward()
# # optim_g.step()
# # if batch_idx % 100 == 0:
# # print(f"Epoch {epoch} Batch {batch_idx} | G Loss: {loss_gen.item():.3f} | D Loss: {loss_disc.item():.3f}")
# # Save checkpoint
# torch.save({
# 'generator': generator.state_dict(),
# 'mpd': mpd.state_dict(),
# 'msd': msd.state_dict(),
# 'optim_g': optim_g.state_dict(),
# 'optim_d': optim_d.state_dict(),
# 'epoch': epoch,
# }, f"checkpoint_epoch_{epoch}.pt")
# # Finalize
# generator.eval()
# generator.remove_weight_norm()
# print("Training completed")
# Training parameters
num_epochs = 100
start_epoch = 1
# Load checkpoint if resuming
checkpoint_path = None # Set to path if resuming
if checkpoint_path:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
mpd.load_state_dict(checkpoint['mpd'])
msd.load_state_dict(checkpoint['msd'])
optim_g.load_state_dict(checkpoint['optim_g'])
optim_d.load_state_dict(checkpoint['optim_d'])
scaler_g.load_state_dict(checkpoint['scaler_g'])
scaler_d.load_state_dict(checkpoint['scaler_d'])
start_epoch = checkpoint['epoch'] + 1
for epoch in range(start_epoch, num_epochs):
generator.train()
mpd.train()
msd.train()
for batch_idx, (embeddings, audio) in enumerate(tqdm(loader)):
embeddings, audio = embeddings.to(device), audio.to(device)
audio = audio.unsqueeze(1)
# Convert embeddings
if embeddings.ndim == 2:
embeddings = embeddings.unsqueeze(0).permute(0, 2, 1)
else:
embeddings = embeddings.permute(0, 2, 1)
# --- Mixed precision forward ---
with autocast():
fake_audio = generator(embeddings)
target_len = audio.size(2)
fake_audio = fake_audio[:, :, :target_len] if fake_audio.size(2) > target_len \
else F.pad(fake_audio, (0, target_len - fake_audio.size(2)))
# --- Discriminator update ---
optim_d.zero_grad()
with autocast():
y_d_rs_mpd, y_d_gs_mpd, _, _ = mpd(audio, fake_audio.detach())
y_d_rs_msd, y_d_gs_msd, _, _ = msd(audio, fake_audio.detach())
loss_disc = discriminator_loss(y_d_rs_mpd + y_d_rs_msd, y_d_gs_mpd + y_d_gs_msd)
scaler_d.scale(loss_disc).backward()
scaler_d.step(optim_d)
scaler_d.update()
# --- Generator update ---
optim_g.zero_grad()
with autocast():
y_d_gs_mpd, _, fmap_rs_mpd, fmap_gs_mpd = mpd(audio, fake_audio)
y_d_gs_msd, _, fmap_rs_msd, fmap_gs_msd = msd(audio, fake_audio)
loss_gen = generator_loss(y_d_gs_mpd + y_d_gs_msd)
loss_feat = feature_loss(fmap_rs_mpd + fmap_rs_msd, fmap_gs_mpd + fmap_gs_msd)
total_loss = loss_gen + loss_feat
scaler_g.scale(total_loss).backward()
scaler_g.step(optim_g)
scaler_g.update()
if batch_idx % 100 == 0:
print(f"Epoch {epoch} Batch {batch_idx} | G Loss: {loss_gen.item():.3f} | D Loss: {loss_disc.item():.3f}")
print(f"Memory allocated: {torch.cuda.memory_allocated()/1e9:.2f}GB")
print(f"Memory reserved: {torch.cuda.memory_reserved()/1e9:.2f}GB")
# Memory cleanup
torch.cuda.empty_cache()
# Save checkpoint
torch.save({
'generator': generator.state_dict(),
'mpd': mpd.state_dict(),
'msd': msd.state_dict(),
'optim_g': optim_g.state_dict(),
'optim_d': optim_d.state_dict(),
'scaler_g': scaler_g.state_dict(),
'scaler_d': scaler_d.state_dict(),
'epoch': epoch,
'hparams': h.__dict__,
}, f"checkpoint_epoch_{epoch}.pt")
# Finalize
generator.eval().cpu()
generator.remove_weight_norm()
torch.save(generator.state_dict(), "final_generator.pth")
print("Training completed")