File size: 7,327 Bytes
7d71c91 6eea8b2 7d71c91 6eea8b2 7d71c91 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 | """
V6 Inference — encoder-decoder TTS with MioCodec + speaker cloning
===================================================================
1. Encode text with encoder (bidirectional, once)
2. Autoregressively decode audio tokens with decoder + speaker embedding
3. Decode tokens with MioCodec using global_embedding
"""
import torch
import argparse
import time
from pathlib import Path
from config import (
AUDIO_OFFSET, NUM_AUDIO_TOKENS, END_OF_SPEECH_TOKEN_ID,
START_OF_SPEECH_TOKEN_ID, CODEC_SAMPLE_RATE, CODEC_FRAME_RATE,
)
from tokenizer import TTSTokenizer
from codec import CodecV6
from model import load_for_inference
@torch.no_grad()
def generate(model, tokenizer, text, speaker_emb,
max_new_tokens=512, temperature=0.7, top_k=250,
top_p=0.95, rep_penalty=1.1, device="cuda"):
"""
Generate audio tokens from text.
Args:
model: TTSEncoderDecoder
tokenizer: TTSTokenizer
text: input text string
speaker_emb: [128] MioCodec global_embedding
max_new_tokens: max decoder steps
temperature: sampling temperature
top_k: top-k filtering
top_p: nucleus sampling threshold
rep_penalty: repetition penalty on recent tokens
device: cuda/cpu
Returns:
torch.Tensor of MioCodec codes [num_frames], or None
"""
# 1. Encode text (one shot, bidirectional)
enc_ids = tokenizer.build_encoder_input(text).unsqueeze(0).to(device)
enc_mask = torch.ones_like(enc_ids)
enc_out = model.encode(enc_ids, enc_mask) # [1, T_enc, d_model]
# 2. Prepare speaker embedding
spk = speaker_emb.unsqueeze(0).to(device) # [1, 128]
# 3. Start decoder with <sos>
dec_ids = torch.tensor([[START_OF_SPEECH_TOKEN_ID]], device=device)
past = None
generated_tokens = []
for step in range(max_new_tokens):
inp = dec_ids[:, -1:] if past is not None else dec_ids
# Only pass speaker_emb on first step (already baked into embeddings)
# Actually, with KV-cache, we only process new tokens, so speaker
# needs to be added each time. The model handles this correctly.
dec_out = model.decoder(
input_ids=inp,
encoder_output=enc_out,
encoder_mask=enc_mask,
speaker_emb=spk,
past_key_values=past,
use_cache=True,
)
past = dec_out["past_key_values"]
logits = dec_out["logits"][:, -1, :]
# Mask: only allow audio tokens + end_of_speech
mask = torch.full_like(logits, float("-inf"))
mask[:, AUDIO_OFFSET:AUDIO_OFFSET + NUM_AUDIO_TOKENS] = 0
mask[:, END_OF_SPEECH_TOKEN_ID] = 0
logits = logits + mask
# Repetition penalty on recent tokens
if rep_penalty != 1.0 and generated_tokens:
recent = set(generated_tokens[-100:])
for tid in recent:
if AUDIO_OFFSET <= tid < AUDIO_OFFSET + NUM_AUDIO_TOKENS:
logits[:, tid] /= rep_penalty
logits = logits / temperature
# Top-k
if top_k > 0:
kth = torch.topk(logits, min(top_k, logits.shape[-1])).values[:, -1:]
logits[logits < kth] = float("-inf")
# Top-p (nucleus)
if top_p < 1.0:
sorted_l, sorted_i = torch.sort(logits, descending=True)
cum = torch.cumsum(torch.softmax(sorted_l, -1), -1)
remove = cum > top_p
remove[:, 1:] = remove[:, :-1].clone()
remove[:, 0] = False
logits[remove.scatter(1, sorted_i, remove)] = float("-inf")
next_tok = torch.multinomial(torch.softmax(logits, -1), 1)
tok_id = next_tok.item()
if tok_id == END_OF_SPEECH_TOKEN_ID:
break
generated_tokens.append(tok_id)
dec_ids = torch.cat([dec_ids, next_tok], dim=-1)
if not generated_tokens:
return None
result = torch.tensor(generated_tokens, dtype=torch.long)
audio_mask = (result >= AUDIO_OFFSET) & (result < AUDIO_OFFSET + NUM_AUDIO_TOKENS)
return result[audio_mask] - AUDIO_OFFSET
def synthesize(checkpoint, text, output="output.wav",
speaker_wav=None, speaker_emb_path=None,
temperature=0.7, top_k=250, top_p=0.95,
rep_penalty=1.1, max_tokens=512, device="cuda"):
"""
Full TTS pipeline: text → audio file.
Speaker can be provided as:
1. speaker_wav: path to reference audio (will encode with MioCodec)
2. speaker_emb_path: path to saved .pt embedding
"""
print(f"'{text[:80]}' | T={temperature}")
model = load_for_inference(checkpoint, device=device)
tokenizer = TTSTokenizer()
codec = CodecV6(device=device)
# Get speaker embedding
if speaker_emb_path:
import numpy as np
if speaker_emb_path.endswith('.npy'):
speaker_emb = torch.from_numpy(np.load(speaker_emb_path)).to(device)
else:
speaker_emb = torch.load(speaker_emb_path, map_location=device, weights_only=False)
if isinstance(speaker_emb, dict):
speaker_emb = speaker_emb.get("global_embedding",
speaker_emb.get("embedding"))
if speaker_emb.dim() > 1:
speaker_emb = speaker_emb.squeeze()
print(f"Speaker from preset: {speaker_emb.shape}")
elif speaker_wav:
result = codec.encode(speaker_wav)
speaker_emb = result['global_embedding'].to(device)
print(f"Speaker from wav: {speaker_wav}")
else:
raise ValueError("Provide speaker_wav or speaker_emb_path")
# Generate
t0 = time.time()
codes = generate(model, tokenizer, text, speaker_emb, max_tokens,
temperature, top_k, top_p, rep_penalty, device)
gen_time = time.time() - t0
if codes is None or len(codes) == 0:
print("No audio generated!")
return
audio_dur = len(codes) / CODEC_FRAME_RATE
rtf = gen_time / audio_dur if audio_dur > 0 else float('inf')
print(f"{len(codes)} tokens ({audio_dur:.1f}s audio, {gen_time:.2f}s gen, RTF={rtf:.3f})")
# Decode to wav
wav = codec.tokens_to_wav(codes, speaker_emb, output)
print(f"Saved: {output} ({len(wav)/CODEC_SAMPLE_RATE:.2f}s)")
return wav
def main():
p = argparse.ArgumentParser(description="V6 TTS Inference")
p.add_argument("--checkpoint", required=True)
p.add_argument("--text", required=True)
p.add_argument("--output", default="output.wav")
p.add_argument("--speaker-wav", help="Reference audio for voice cloning")
p.add_argument("--speaker-emb", help="Path to saved speaker embedding .pt")
p.add_argument("--temperature", type=float, default=0.7)
p.add_argument("--top-k", type=int, default=250)
p.add_argument("--top-p", type=float, default=0.95)
p.add_argument("--rep-penalty", type=float, default=1.1)
p.add_argument("--max-tokens", type=int, default=512)
a = p.parse_args()
synthesize(a.checkpoint, a.text, a.output,
speaker_wav=a.speaker_wav,
speaker_emb_path=a.speaker_emb,
temperature=a.temperature, top_k=a.top_k,
top_p=a.top_p, rep_penalty=a.rep_penalty,
max_tokens=a.max_tokens)
if __name__ == "__main__":
main()
|