File size: 4,644 Bytes
14ff14a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | import os
import torch
import scipy.io.wavfile as wavfile
from tqdm import tqdm
from datasets import load_dataset
from src.rwkv7 import RWKV7
def load_latest_checkpoint(model, checkpoint_dir):
"""
Load the latest checkpoint for the model from the specified directory.
Args:
model: The model to load the checkpoint into
checkpoint_dir: Directory containing checkpoint files (.pt)
Returns:
int: 0 if no checkpoint was found
"""
checkpoint_files = [f for f in os.listdir(checkpoint_dir) if f.endswith('.pt')]
if not checkpoint_files:
print("No checkpoint files found in the directory.")
return 0
latest_checkpoint = max(checkpoint_files, key=lambda x: os.path.getctime(os.path.join(checkpoint_dir, x)))
checkpoint_path = os.path.join(checkpoint_dir, latest_checkpoint)
model.load_state_dict(torch.load(checkpoint_path))
print(f"Loaded checkpoint: {checkpoint_path}")
def initialize_model():
"""
Initialize the RWKV7 model and load the latest checkpoint.
Returns:
The initialized model
"""
# Initialize model and move to GPU
model = RWKV7(text_vocab=128, audio_vocab=8192 + 1, dim=128, n_blocks=5).cuda()
# Load latest checkpoint
checkpoint_dir = "./checkpoints"
load_latest_checkpoint(model, checkpoint_dir)
return model
def load_codec():
"""
Load and initialize the audio codec model.
Returns:
The initialized codec model
"""
config = "lucadellalib/focalcodec_12_5hz"
codec = torch.hub.load("lucadellalib/focalcodec", "focalcodec", config=config, force_reload=False)
codec.eval().requires_grad_(False)
codec.to('cuda')
return codec
def load_dataset_sample():
"""
Load a sample from the JLSpeech dataset.
Returns:
A sample from the dataset
"""
ds = load_dataset("JerryAGENDD/JLSpeech_tokenized", cache_dir="../temp_datasets")['train'][0]
print(f"Loaded sample with text: {ds['normalized_text']}")
return ds
def generate_audio_tokens(model, initial_tokens, target_length):
"""
Generate audio tokens using the model based on initial tokens.
Args:
model: The RWKV7 model
initial_tokens: List of initial tokens to start generation from
target_length: Total length of tokens to generate
Returns:
List of generated tokens
"""
tokens = initial_tokens.copy() # Create a copy to avoid modifying the original list
tokens_tensor = torch.tensor(tokens).unsqueeze(0).cuda()
for _ in tqdm(range(len(initial_tokens), target_length)):
outputs = model(None, tokens_tensor)
# Get the last logit
last_logit = outputs[0, -1, :]
# Sample from the logits (using argmax for deterministic output)
next_token = torch.argmax(last_logit).item()
# Add the sampled token
tokens.append(next_token)
# Update tokens tensor
tokens_tensor = torch.tensor(tokens).unsqueeze(0).cuda()
return tokens
def save_audio_file(signal, filename, sample_rate):
"""
Save audio signal to a WAV file.
Args:
signal: Audio signal tensor
filename: Output filename
sample_rate: Sample rate of the audio
"""
signal_array = signal.cpu().numpy()
wavfile.write(filename, sample_rate, signal_array)
print(f"Saved audio to {filename}")
def main():
# Initialize model and codec
model = initialize_model()
codec = load_codec()
# Load dataset
ds = load_dataset_sample()
# Get target tokens and initial tokens for generation
target_tokens = ds['audio_token']
initial_tokens = target_tokens[:10]
# Generate audio tokens
print("Generating audio tokens...")
generated_tokens = generate_audio_tokens(model, initial_tokens, len(target_tokens))
# Convert tokens to tensors for decoding
target_tensor = torch.tensor(target_tokens).unsqueeze(0).cuda()
generated_tensor = torch.tensor(generated_tokens).unsqueeze(0).cuda()
print(f"Target shape: {target_tensor.shape}")
print(f"Generated shape: {generated_tensor.shape}")
# Decode audio signals
print("Decoding audio signals...")
target_signal = codec.toks_to_sig(target_tensor).squeeze(0)
generated_signal = codec.toks_to_sig(generated_tensor).squeeze(0)
# Save audio files
save_audio_file(target_signal, 'output_target.wav', codec.sample_rate)
save_audio_file(generated_signal, 'output_model.wav', codec.sample_rate)
if __name__ == "__main__":
main()
|