beleata74's picture
Upload folder using huggingface_hub
256e3e6 verified
"""Audio player for LLM-generated speech tokens"""
import io
import numpy as np
import soundfile as sf
import torch
from nemo.collections.tts.models import AudioCodecModel
from scipy.signal import resample_poly
from config import (
TOKENIZER_LENGTH, START_OF_TEXT, END_OF_TEXT,
START_OF_SPEECH, END_OF_SPEECH, START_OF_HUMAN, END_OF_HUMAN,
START_OF_AI, END_OF_AI, PAD_TOKEN, AUDIO_TOKENS_START, CODEBOOK_SIZE,
CODEC_MODEL_NAME, NUM_CODEBOOKS, SAMPLE_RATE, REF_AUDIO_SECONDS,
)
class LLMAudioPlayer:
def __init__(self, tokenizer) -> None:
self.nemo_codec_model = AudioCodecModel\
.from_pretrained(CODEC_MODEL_NAME).eval()
if torch.cuda.is_available():
self.device = 'cuda'
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
self.device = 'mps'
else:
self.device = 'cpu'
self.nemo_codec_model.to(self.device)
# NOTE: torch.compile disabled for codec due to varying input shapes causing recompilations
# The codec is called with different frame counts, which triggers too many recompilations
self.tokenizer = tokenizer
self.tokenizer_length = TOKENIZER_LENGTH
self.start_of_text = START_OF_TEXT
self.end_of_text = END_OF_TEXT
self.start_of_speech = START_OF_SPEECH
self.end_of_speech = END_OF_SPEECH
self.start_of_human = START_OF_HUMAN
self.end_of_human = END_OF_HUMAN
self.start_of_ai = START_OF_AI
self.end_of_ai = END_OF_AI
self.pad_token = PAD_TOKEN
self.audio_tokens_start = AUDIO_TOKENS_START
self.codebook_size = CODEBOOK_SIZE
self.num_codebooks = NUM_CODEBOOKS
self.sample_rate = SAMPLE_RATE
def output_validation(self, out_ids):
start_of_speech_flag = self.start_of_speech in out_ids
end_of_speech_flag = self.end_of_speech in out_ids
if not (start_of_speech_flag and end_of_speech_flag):
raise ValueError('Special speech tokens not exist!')
def get_nano_codes(self, out_ids):
start_a_idx = (out_ids == self.start_of_speech).nonzero(as_tuple=True)[0].item()
end_a_idx = (out_ids == self.end_of_speech).nonzero(as_tuple=True)[0].item()
if start_a_idx >= end_a_idx:
raise ValueError('Invalid audio codes sequence!')
audio_codes = out_ids[start_a_idx+1 : end_a_idx]
if len(audio_codes) % self.num_codebooks:
raise ValueError(f'The length of the sequence must be a multiple of {self.num_codebooks}!')
audio_codes = audio_codes.reshape(-1, self.num_codebooks)
audio_codes = audio_codes - torch.tensor([self.codebook_size * i for i in range(self.num_codebooks)])
audio_codes = audio_codes - self.audio_tokens_start
if (audio_codes < 0).sum().item() > 0:
raise ValueError('Invalid audio tokens!')
audio_codes = audio_codes.T.unsqueeze(0)
len_ = torch.tensor([audio_codes.shape[-1]])
return audio_codes, len_
def get_text(self, out_ids):
try:
start_t_idx = (out_ids == self.start_of_text).tolist().index(True)
end_t_idx = (out_ids == self.end_of_text).tolist().index(True)
txt_tokens = out_ids[start_t_idx : end_t_idx+1]
text = self.tokenizer.decode(txt_tokens, skip_special_tokens=True)
return text
except ValueError:
return None
def get_waveform(self, out_ids):
out_ids = out_ids.flatten()
self.output_validation(out_ids)
audio_codes, len_ = self.get_nano_codes(out_ids)
audio_codes, len_ = audio_codes.to(self.device), len_.to(self.device)
with torch.inference_mode():
reconstructed_audio, _ = self.nemo_codec_model.decode(tokens=audio_codes, tokens_len=len_)
output_audio = reconstructed_audio.cpu().detach().numpy().squeeze()
text = self.get_text(out_ids)
return output_audio, text
def decode_audio_chunk(self, audio_codes):
"""Decode a chunk of audio codes (shape: [num_frames, num_codebooks])."""
if len(audio_codes) == 0:
return None
# Process audio codes: subtract offsets for each codebook
audio_codes = torch.tensor(audio_codes, device=self.device)
audio_codes = audio_codes - torch.tensor([self.codebook_size * i for i in range(self.num_codebooks)], device=self.device)
audio_codes = audio_codes - self.audio_tokens_start
if (audio_codes < 0).sum().item() > 0:
return None # Invalid tokens, skip
# Shape: (1, 4, num_frames) - batch_size=1, num_codebooks=4, num_frames
audio_codes = audio_codes.T.unsqueeze(0)
len_ = torch.tensor([audio_codes.shape[-1]], device=self.device)
with torch.inference_mode():
reconstructed_audio, _ = self.nemo_codec_model.decode(tokens=audio_codes, tokens_len=len_)
output_audio = reconstructed_audio.cpu().detach().numpy().squeeze()
return output_audio
def _normalize_reference_audio(self, audio, sample_rate, ref_seconds):
if audio.ndim == 2:
audio = audio.mean(axis=1)
if sample_rate != self.sample_rate:
audio = resample_poly(audio, self.sample_rate, sample_rate)
max_samples = int(round(self.sample_rate * ref_seconds))
return np.asarray(audio[:max_samples], dtype=np.float32)
def _flatten_reference_codes(self, ref_tokens):
codes = ref_tokens[0].detach().cpu().numpy().T
offsets = np.array([self.codebook_size * i for i in range(self.num_codebooks)])
codes = codes + offsets
if len(codes) > 1:
frame_changed = np.any(codes[1:] != codes[:-1], axis=1)
keep = np.insert(frame_changed, 0, True)
codes = codes[keep]
return (codes + self.audio_tokens_start).flatten().tolist()
def prepare_reference_audio_tokens(self, reference_audio_path=None, reference_audio_bytes=None, ref_seconds=REF_AUDIO_SECONDS):
if bool(reference_audio_path) == bool(reference_audio_bytes):
raise ValueError("Provide exactly one of reference_audio_path or reference_audio_bytes")
if reference_audio_path:
audio, sample_rate = sf.read(reference_audio_path)
else:
audio, sample_rate = sf.read(io.BytesIO(reference_audio_bytes))
audio = self._normalize_reference_audio(audio, sample_rate, ref_seconds)
audio_tensor = torch.tensor(audio, dtype=torch.float32, device=self.device).unsqueeze(0)
audio_len = torch.tensor([audio_tensor.shape[1]], dtype=torch.long, device=self.device)
with torch.inference_mode():
ref_tokens, ref_tokens_len = self.nemo_codec_model.encode(audio=audio_tensor, audio_len=audio_len)
return self._flatten_reference_codes(ref_tokens), ref_tokens_len.tolist()