beleata74 commited on
Commit
6b1f961
·
verified ·
1 Parent(s): 7432b55

Upload inference.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference.py +241 -0
inference.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ V6 Inference — encoder-decoder TTS with MioCodec + speaker cloning
3
+ ===================================================================
4
+ 1. Encode text with encoder (bidirectional, once)
5
+ 2. Autoregressively decode audio tokens with decoder + speaker embedding
6
+ 3. Decode tokens with MioCodec using global_embedding
7
+ """
8
+
9
+ import torch
10
+ import argparse
11
+ import time
12
+ from pathlib import Path
13
+ from config import (
14
+ AUDIO_OFFSET, NUM_AUDIO_TOKENS, END_OF_SPEECH_TOKEN_ID,
15
+ START_OF_SPEECH_TOKEN_ID, CODEC_SAMPLE_RATE, CODEC_FRAME_RATE,
16
+ )
17
+ from tokenizer import TTSTokenizer
18
+ from codec import CodecV6
19
+ from model import load_for_inference
20
+
21
+
22
+ def _split_text(text, tokenizer, max_len=250):
23
+ """Split text into chunks that fit within encoder max_text_len."""
24
+ import re
25
+ sentences = re.split(r'(?<=[.!?;:,])\s+', text)
26
+ chunks = []
27
+ current = ""
28
+ for sent in sentences:
29
+ candidate = (current + " " + sent).strip() if current else sent
30
+ enc_len = len(tokenizer.build_encoder_input(candidate))
31
+ if enc_len <= max_len:
32
+ current = candidate
33
+ else:
34
+ if current:
35
+ chunks.append(current)
36
+ # If single sentence is too long, split by words
37
+ if len(tokenizer.build_encoder_input(sent)) > max_len:
38
+ words = sent.split()
39
+ current = ""
40
+ for w in words:
41
+ cand = (current + " " + w).strip() if current else w
42
+ if len(tokenizer.build_encoder_input(cand)) <= max_len:
43
+ current = cand
44
+ else:
45
+ if current:
46
+ chunks.append(current)
47
+ current = w
48
+ else:
49
+ current = sent
50
+ if current:
51
+ chunks.append(current)
52
+ return chunks
53
+
54
+
55
+ @torch.no_grad()
56
+ def generate(model, tokenizer, text, speaker_emb,
57
+ max_new_tokens=512, temperature=0.7, top_k=250,
58
+ top_p=0.95, rep_penalty=1.1, device="cuda"):
59
+ """
60
+ Generate audio tokens from text.
61
+
62
+ Args:
63
+ model: TTSEncoderDecoder
64
+ tokenizer: TTSTokenizer
65
+ text: input text string
66
+ speaker_emb: [128] MioCodec global_embedding
67
+ max_new_tokens: max decoder steps
68
+ temperature: sampling temperature
69
+ top_k: top-k filtering
70
+ top_p: nucleus sampling threshold
71
+ rep_penalty: repetition penalty on recent tokens
72
+ device: cuda/cpu
73
+
74
+ Returns:
75
+ torch.Tensor of MioCodec codes [num_frames], or None
76
+ """
77
+ # 1. Encode text (one shot, bidirectional)
78
+ enc_ids = tokenizer.build_encoder_input(text).unsqueeze(0).to(device)
79
+ enc_mask = torch.ones_like(enc_ids)
80
+
81
+ enc_out = model.encode(enc_ids, enc_mask) # [1, T_enc, d_model]
82
+
83
+ # 2. Prepare speaker embedding
84
+ spk = speaker_emb.unsqueeze(0).to(device) # [1, 128]
85
+
86
+ # 3. Start decoder with <sos>
87
+ dec_ids = torch.tensor([[START_OF_SPEECH_TOKEN_ID]], device=device)
88
+ past = None
89
+ generated_tokens = []
90
+
91
+ for step in range(max_new_tokens):
92
+ inp = dec_ids[:, -1:] if past is not None else dec_ids
93
+
94
+ # Only pass speaker_emb on first step (already baked into embeddings)
95
+ # Actually, with KV-cache, we only process new tokens, so speaker
96
+ # needs to be added each time. The model handles this correctly.
97
+ dec_out = model.decoder(
98
+ input_ids=inp,
99
+ encoder_output=enc_out,
100
+ encoder_mask=enc_mask,
101
+ speaker_emb=spk,
102
+ past_key_values=past,
103
+ use_cache=True,
104
+ )
105
+ past = dec_out["past_key_values"]
106
+ logits = dec_out["logits"][:, -1, :]
107
+
108
+ # Mask: only allow audio tokens + end_of_speech
109
+ mask = torch.full_like(logits, float("-inf"))
110
+ mask[:, AUDIO_OFFSET:AUDIO_OFFSET + NUM_AUDIO_TOKENS] = 0
111
+ mask[:, END_OF_SPEECH_TOKEN_ID] = 0
112
+ logits = logits + mask
113
+
114
+ # Repetition penalty on recent tokens
115
+ if rep_penalty != 1.0 and generated_tokens:
116
+ recent = set(generated_tokens[-100:])
117
+ for tid in recent:
118
+ if AUDIO_OFFSET <= tid < AUDIO_OFFSET + NUM_AUDIO_TOKENS:
119
+ logits[:, tid] /= rep_penalty
120
+
121
+ logits = logits / temperature
122
+
123
+ # Top-k
124
+ if top_k > 0:
125
+ kth = torch.topk(logits, min(top_k, logits.shape[-1])).values[:, -1:]
126
+ logits[logits < kth] = float("-inf")
127
+
128
+ # Top-p (nucleus)
129
+ if top_p < 1.0:
130
+ sorted_l, sorted_i = torch.sort(logits, descending=True)
131
+ cum = torch.cumsum(torch.softmax(sorted_l, -1), -1)
132
+ remove = cum > top_p
133
+ remove[:, 1:] = remove[:, :-1].clone()
134
+ remove[:, 0] = False
135
+ logits[remove.scatter(1, sorted_i, remove)] = float("-inf")
136
+
137
+ next_tok = torch.multinomial(torch.softmax(logits, -1), 1)
138
+ tok_id = next_tok.item()
139
+
140
+ if tok_id == END_OF_SPEECH_TOKEN_ID:
141
+ break
142
+
143
+ generated_tokens.append(tok_id)
144
+ dec_ids = torch.cat([dec_ids, next_tok], dim=-1)
145
+
146
+ if not generated_tokens:
147
+ return None
148
+
149
+ result = torch.tensor(generated_tokens, dtype=torch.long)
150
+ audio_mask = (result >= AUDIO_OFFSET) & (result < AUDIO_OFFSET + NUM_AUDIO_TOKENS)
151
+ return result[audio_mask] - AUDIO_OFFSET
152
+
153
+
154
+ def synthesize(checkpoint, text, output="output.wav",
155
+ speaker_wav=None, speaker_emb_path=None,
156
+ temperature=0.7, top_k=250, top_p=0.95,
157
+ rep_penalty=1.1, max_tokens=512, device="cuda"):
158
+ """
159
+ Full TTS pipeline: text → audio file.
160
+
161
+ Speaker can be provided as:
162
+ 1. speaker_wav: path to reference audio (will encode with MioCodec)
163
+ 2. speaker_emb_path: path to saved .pt embedding
164
+ """
165
+ print(f"'{text[:80]}' | T={temperature}")
166
+ model = load_for_inference(checkpoint, device=device)
167
+ tokenizer = TTSTokenizer()
168
+ codec = CodecV6(device=device)
169
+
170
+ # Get speaker embedding
171
+ if speaker_emb_path:
172
+ import numpy as np
173
+ if speaker_emb_path.endswith('.npy'):
174
+ speaker_emb = torch.from_numpy(np.load(speaker_emb_path)).to(device)
175
+ else:
176
+ speaker_emb = torch.load(speaker_emb_path, map_location=device, weights_only=False)
177
+ if isinstance(speaker_emb, dict):
178
+ speaker_emb = speaker_emb.get("global_embedding",
179
+ speaker_emb.get("embedding"))
180
+ if speaker_emb.dim() > 1:
181
+ speaker_emb = speaker_emb.squeeze()
182
+ print(f"Speaker from preset: {speaker_emb.shape}")
183
+ elif speaker_wav:
184
+ result = codec.encode(speaker_wav)
185
+ speaker_emb = result['global_embedding'].to(device)
186
+ print(f"Speaker from wav: {speaker_wav}")
187
+ else:
188
+ raise ValueError("Provide speaker_wav or speaker_emb_path")
189
+
190
+ # Split long text into chunks that fit encoder max_text_len
191
+ chunks = _split_text(text, tokenizer, max_len=250)
192
+ print(f"Text split into {len(chunks)} chunk(s)")
193
+
194
+ t0 = time.time()
195
+ all_codes = []
196
+ for i, chunk in enumerate(chunks):
197
+ enc_len = len(tokenizer.build_encoder_input(chunk))
198
+ print(f" [{i+1}/{len(chunks)}] {enc_len} enc tokens: '{chunk[:60]}...'")
199
+ codes = generate(model, tokenizer, chunk, speaker_emb, max_tokens,
200
+ temperature, top_k, top_p, rep_penalty, device)
201
+ if codes is not None and len(codes) > 0:
202
+ all_codes.append(codes)
203
+ gen_time = time.time() - t0
204
+
205
+ if not all_codes:
206
+ print("No audio generated!")
207
+ return
208
+
209
+ codes = torch.cat(all_codes)
210
+ audio_dur = len(codes) / CODEC_FRAME_RATE
211
+ rtf = gen_time / audio_dur if audio_dur > 0 else float('inf')
212
+ print(f"{len(codes)} tokens ({audio_dur:.1f}s audio, {gen_time:.2f}s gen, RTF={rtf:.3f})")
213
+
214
+ # Decode to wav
215
+ wav = codec.tokens_to_wav(codes, speaker_emb, output)
216
+ print(f"Saved: {output} ({len(wav)/CODEC_SAMPLE_RATE:.2f}s)")
217
+ return wav
218
+
219
+
220
+ def main():
221
+ p = argparse.ArgumentParser(description="V6 TTS Inference")
222
+ p.add_argument("--checkpoint", required=True)
223
+ p.add_argument("--text", required=True)
224
+ p.add_argument("--output", default="output.wav")
225
+ p.add_argument("--speaker-wav", help="Reference audio for voice cloning")
226
+ p.add_argument("--speaker-emb", help="Path to saved speaker embedding .pt")
227
+ p.add_argument("--temperature", type=float, default=0.7)
228
+ p.add_argument("--top-k", type=int, default=250)
229
+ p.add_argument("--top-p", type=float, default=0.95)
230
+ p.add_argument("--rep-penalty", type=float, default=1.1)
231
+ p.add_argument("--max-tokens", type=int, default=512)
232
+ a = p.parse_args()
233
+ synthesize(a.checkpoint, a.text, a.output,
234
+ speaker_wav=a.speaker_wav,
235
+ speaker_emb_path=a.speaker_emb,
236
+ temperature=a.temperature, top_k=a.top_k,
237
+ top_p=a.top_p, rep_penalty=a.rep_penalty,
238
+ max_tokens=a.max_tokens)
239
+
240
+ if __name__ == "__main__":
241
+ main()