mah92 commited on
Commit
fea5b14
Β·
verified Β·
1 Parent(s): 1a157f2

Upload cli.py

Browse files
Files changed (1) hide show
  1. cli.py +438 -0
cli.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import datetime as dt
3
+ import os
4
+ import warnings
5
+ from pathlib import Path
6
+
7
+ import matplotlib.pyplot as plt
8
+ import numpy as np
9
+ import soundfile as sf
10
+ import torch
11
+
12
+ from matcha.hifigan.config import v1
13
+ from matcha.hifigan.denoiser import Denoiser
14
+ from matcha.hifigan.env import AttrDict
15
+ from matcha.hifigan.models import Generator as HiFiGAN
16
+ from matcha.models.matcha_tts import MatchaTTS
17
+ from matcha.text import sequence_to_text, text_to_sequence
18
+ from matcha.utils.utils import assert_model_downloaded, get_user_data_dir, intersperse
19
+
20
+ MATCHA_URLS = {
21
+ "matcha_ljspeech": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/matcha_ljspeech.ckpt",
22
+ "matcha_vctk": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/matcha_vctk.ckpt",
23
+ }
24
+
25
+ VOCODER_URLS = {
26
+ "hifigan_T2_v1": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/generator_v1", # Old url: https://drive.google.com/file/d/14NENd4equCBLyyCSke114Mv6YR_j_uFs/view?usp=drive_link
27
+ "hifigan_univ_v1": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/g_02500000", # Old url: https://drive.google.com/file/d/1qpgI41wNXFcH-iKq1Y42JlBC9j0je8PW/view?usp=drive_link
28
+ }
29
+
30
+ MULTISPEAKER_MODEL = {
31
+ "matcha_vctk": {"vocoder": "hifigan_univ_v1", "speaking_rate": 0.85, "spk": 0, "spk_range": (0, 107)}
32
+ }
33
+
34
+ SINGLESPEAKER_MODEL = {"matcha_ljspeech": {"vocoder": "hifigan_T2_v1", "speaking_rate": 0.95, "spk": None}}
35
+
36
+
37
+ def plot_spectrogram_to_numpy(spectrogram, filename):
38
+ fig, ax = plt.subplots(figsize=(12, 3))
39
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
40
+ plt.colorbar(im, ax=ax)
41
+ plt.xlabel("Frames")
42
+ plt.ylabel("Channels")
43
+ plt.title("Synthesised Mel-Spectrogram")
44
+ fig.canvas.draw()
45
+ plt.savefig(filename)
46
+
47
+
48
+ def process_text(i: int, text: str, device: torch.device):
49
+ print(f"[{i}] - Input text: {text}")
50
+ x = torch.tensor(
51
+ intersperse(text_to_sequence(text, ["persian_cleaners_piper"])[0], 0),
52
+ dtype=torch.long,
53
+ device=device,
54
+ )[None]
55
+ x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device=device)
56
+ x_phones = sequence_to_text(x.squeeze(0).tolist())
57
+ print(f"[{i}] - Phonetised text: {x_phones[1::2]}")
58
+
59
+ return {"x_orig": text, "x": x, "x_lengths": x_lengths, "x_phones": x_phones}
60
+
61
+
62
+ def get_texts(args):
63
+ if args.text:
64
+ texts = [args.text]
65
+ else:
66
+ with open(args.file, encoding="utf-8") as f:
67
+ texts = f.readlines()
68
+ return texts
69
+
70
+
71
+ def assert_required_models_available(args):
72
+ save_dir = get_user_data_dir()
73
+ if not hasattr(args, "checkpoint_path") and args.checkpoint_path is None:
74
+ model_path = args.checkpoint_path
75
+ else:
76
+ model_path = save_dir / f"{args.model}.ckpt"
77
+ assert_model_downloaded(model_path, MATCHA_URLS[args.model])
78
+
79
+ # If the vocoder is not one of the predefined ones, treat it as a custom path
80
+ if args.vocoder not in VOCODER_URLS:
81
+ vocoder_path = Path(args.vocoder) # Treat the vocoder argument as a direct path
82
+ else:
83
+ vocoder_path = save_dir / f"{args.vocoder}"
84
+ assert_model_downloaded(vocoder_path, VOCODER_URLS[args.vocoder])
85
+
86
+ return {"matcha": model_path, "vocoder": vocoder_path}
87
+
88
+ def load_hifigan(checkpoint_path, device):
89
+ h = AttrDict(v1)
90
+ hifigan = HiFiGAN(h).to(device)
91
+ hifigan.load_state_dict(torch.load(checkpoint_path, map_location=device)["generator"])
92
+ _ = hifigan.eval()
93
+ hifigan.remove_weight_norm()
94
+ return hifigan
95
+
96
+
97
+ def load_vocoder(vocoder_name, checkpoint_path, device):
98
+ print(f"[!] Loading {vocoder_name}!")
99
+ vocoder = None
100
+
101
+ # If the vocoder name is not predefined, treat it as a custom path
102
+ if vocoder_name not in ("hifigan_T2_v1", "hifigan_univ_v1"):
103
+ print(f"[!] Loading custom vocoder from {checkpoint_path}")
104
+ try:
105
+ vocoder = load_hifigan(checkpoint_path, device)
106
+ print(f"[+] Custom vocoder {vocoder_name} loaded from {checkpoint_path}!")
107
+ except Exception as e:
108
+ raise NotImplementedError(
109
+ f"Vocoder {vocoder_name} could not be loaded from {checkpoint_path}. Error: {e}"
110
+ )
111
+ else:
112
+ # Load predefined vocoders
113
+ vocoder = load_hifigan(checkpoint_path, device)
114
+
115
+ denoiser = Denoiser(vocoder, mode="zeros")
116
+ print(f"[+] {vocoder_name} loaded!")
117
+ return vocoder, denoiser
118
+
119
+
120
+ def load_matcha(model_name, checkpoint_path, device):
121
+ print(f"[!] Loading {model_name}!")
122
+ model = MatchaTTS.load_from_checkpoint(checkpoint_path, map_location=device)
123
+ _ = model.eval()
124
+
125
+ print(f"[+] {model_name} loaded!")
126
+ return model
127
+
128
+
129
+ def to_waveform(mel, vocoder, denoiser=None, denoiser_strength=0.00025):
130
+ audio = vocoder(mel).clamp(-1, 1)
131
+ if denoiser is not None:
132
+ audio = denoiser(audio.squeeze(), strength=denoiser_strength).cpu().squeeze()
133
+
134
+ return audio.cpu().squeeze()
135
+
136
+
137
+ def save_to_folder(filename: str, output: dict, folder: str, sample_rate: int):
138
+ folder = Path(folder)
139
+ folder.mkdir(exist_ok=True, parents=True)
140
+ plot_spectrogram_to_numpy(np.array(output["mel"].squeeze().float().cpu()), f"{filename}.png")
141
+ np.save(folder / f"{filename}", output["mel"].cpu().numpy())
142
+ sf.write(folder / f"{filename}.wav", output["waveform"], sample_rate, "PCM_24")
143
+ return folder.resolve() / f"{filename}.wav"
144
+
145
+
146
+ def validate_args(args):
147
+ assert (
148
+ args.text or args.file
149
+ ), "Either text or file must be provided Matcha-T(ea)TTS need sometext to whisk the waveforms."
150
+ assert args.temperature >= 0, "Sampling temperature cannot be negative"
151
+ assert args.steps > 0, "Number of ODE steps must be greater than 0"
152
+
153
+ if args.checkpoint_path is None:
154
+ # When using pretrained models
155
+ if args.model in SINGLESPEAKER_MODEL:
156
+ args = validate_args_for_single_speaker_model(args)
157
+
158
+ if args.model in MULTISPEAKER_MODEL:
159
+ args = validate_args_for_multispeaker_model(args)
160
+ else:
161
+ # When using a custom model
162
+ if args.vocoder != "hifigan_univ_v1":
163
+ warn_ = "[-] Using custom model checkpoint! I would suggest passing --vocoder hifigan_univ_v1, unless the custom model is trained on LJ Speech."
164
+ warnings.warn(warn_, UserWarning)
165
+ if args.speaking_rate is None:
166
+ args.speaking_rate = 1.0
167
+
168
+ if args.batched:
169
+ assert args.batch_size > 0, "Batch size must be greater than 0"
170
+ assert args.speaking_rate > 0, "Speaking rate must be greater than 0"
171
+
172
+ return args
173
+
174
+
175
+ def validate_args_for_multispeaker_model(args):
176
+ if args.vocoder is not None:
177
+ if args.vocoder != MULTISPEAKER_MODEL[args.model]["vocoder"]:
178
+ warn_ = f"[-] Using {args.model} model! I would suggest passing --vocoder {MULTISPEAKER_MODEL[args.model]['vocoder']}"
179
+ warnings.warn(warn_, UserWarning)
180
+ else:
181
+ args.vocoder = MULTISPEAKER_MODEL[args.model]["vocoder"]
182
+
183
+ if args.speaking_rate is None:
184
+ args.speaking_rate = MULTISPEAKER_MODEL[args.model]["speaking_rate"]
185
+
186
+ spk_range = MULTISPEAKER_MODEL[args.model]["spk_range"]
187
+ if args.spk is not None:
188
+ assert (
189
+ args.spk >= spk_range[0] and args.spk <= spk_range[-1]
190
+ ), f"Speaker ID must be between {spk_range} for this model."
191
+ else:
192
+ available_spk_id = MULTISPEAKER_MODEL[args.model]["spk"]
193
+ warn_ = f"[!] Speaker ID not provided! Using speaker ID {available_spk_id}"
194
+ warnings.warn(warn_, UserWarning)
195
+ args.spk = available_spk_id
196
+
197
+ return args
198
+
199
+
200
+ def validate_args_for_single_speaker_model(args):
201
+ if args.vocoder is not None:
202
+ if args.vocoder != SINGLESPEAKER_MODEL[args.model]["vocoder"]:
203
+ warn_ = f"[-] Using {args.model} model! I would suggest passing --vocoder {SINGLESPEAKER_MODEL[args.model]['vocoder']}"
204
+ warnings.warn(warn_, UserWarning)
205
+ else:
206
+ args.vocoder = SINGLESPEAKER_MODEL[args.model]["vocoder"]
207
+
208
+ if args.speaking_rate is None:
209
+ args.speaking_rate = SINGLESPEAKER_MODEL[args.model]["speaking_rate"]
210
+
211
+ if args.spk != SINGLESPEAKER_MODEL[args.model]["spk"]:
212
+ warn_ = f"[-] Ignoring speaker id {args.spk} for {args.model}"
213
+ warnings.warn(warn_, UserWarning)
214
+ args.spk = SINGLESPEAKER_MODEL[args.model]["spk"]
215
+
216
+ return args
217
+
218
+
219
+ @torch.inference_mode()
220
+ def cli():
221
+ parser = argparse.ArgumentParser(
222
+ description=" 🍡 Matcha-TTS: A fast TTS architecture with conditional flow matching"
223
+ )
224
+ parser.add_argument(
225
+ "--model",
226
+ type=str,
227
+ default="matcha_ljspeech",
228
+ help="Model to use",
229
+ choices=MATCHA_URLS.keys(),
230
+ )
231
+
232
+ parser.add_argument(
233
+ "--checkpoint_path",
234
+ type=str,
235
+ default=None,
236
+ help="Path to the custom model checkpoint",
237
+ )
238
+
239
+ parser.add_argument(
240
+ "--vocoder",
241
+ type=str,
242
+ default=None,
243
+ help="Vocoder to use (default: will use the one suggested with the pretrained model)",
244
+ # Remove the 'choices' parameter to allow any vocoder name
245
+ )
246
+ parser.add_argument("--text", type=str, default=None, help="Text to synthesize")
247
+ parser.add_argument("--file", type=str, default=None, help="Text file to synthesize")
248
+ parser.add_argument("--spk", type=int, default=None, help="Speaker ID")
249
+ parser.add_argument(
250
+ "--temperature",
251
+ type=float,
252
+ default=0.667,
253
+ help="Variance of the x0 noise (default: 0.667)",
254
+ )
255
+ parser.add_argument(
256
+ "--speaking_rate",
257
+ type=float,
258
+ default=None,
259
+ help="change the speaking rate, a higher value means slower speaking rate (default: 1.0)",
260
+ )
261
+ parser.add_argument("--steps", type=int, default=10, help="Number of ODE steps (default: 10)")
262
+ parser.add_argument("--cpu", action="store_true", help="Use CPU for inference (default: use GPU if available)")
263
+ parser.add_argument(
264
+ "--denoiser_strength",
265
+ type=float,
266
+ default=0.00025,
267
+ help="Strength of the vocoder bias denoiser (default: 0.00025)",
268
+ )
269
+ parser.add_argument(
270
+ "--output_folder",
271
+ type=str,
272
+ default=os.getcwd(),
273
+ help="Output folder to save results (default: current dir)",
274
+ )
275
+ parser.add_argument("--batched", action="store_true", help="Batched inference (default: False)")
276
+ parser.add_argument(
277
+ "--batch_size", type=int, default=32, help="Batch size only useful when --batched (default: 32)"
278
+ )
279
+ parser.add_argument(
280
+ "--sample_rate",
281
+ type=int,
282
+ default=22050,
283
+ help="Sample rate of the output audio (default: 22050)",
284
+ )
285
+
286
+ args = parser.parse_args()
287
+
288
+ args = validate_args(args)
289
+ device = get_device(args)
290
+ print_config(args)
291
+ paths = assert_required_models_available(args)
292
+
293
+ if args.checkpoint_path is not None:
294
+ print(f"[🍡] Loading custom model from {args.checkpoint_path}")
295
+ paths["matcha"] = args.checkpoint_path
296
+ args.model = "custom_model"
297
+
298
+ model = load_matcha(args.model, paths["matcha"], device)
299
+ vocoder, denoiser = load_vocoder(args.vocoder, paths["vocoder"], device)
300
+
301
+ texts = get_texts(args)
302
+
303
+ spk = torch.tensor([args.spk], device=device, dtype=torch.long) if args.spk is not None else None
304
+ if len(texts) == 1 or not args.batched:
305
+ unbatched_synthesis(args, device, model, vocoder, denoiser, texts, spk)
306
+ else:
307
+ batched_synthesis(args, device, model, vocoder, denoiser, texts, spk)
308
+
309
+
310
+ class BatchedSynthesisDataset(torch.utils.data.Dataset):
311
+ def __init__(self, processed_texts):
312
+ self.processed_texts = processed_texts
313
+
314
+ def __len__(self):
315
+ return len(self.processed_texts)
316
+
317
+ def __getitem__(self, idx):
318
+ return self.processed_texts[idx]
319
+
320
+
321
+ def batched_collate_fn(batch):
322
+ x = []
323
+ x_lengths = []
324
+
325
+ for b in batch:
326
+ x.append(b["x"].squeeze(0))
327
+ x_lengths.append(b["x_lengths"])
328
+
329
+ x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True)
330
+ x_lengths = torch.concat(x_lengths, dim=0)
331
+ return {"x": x, "x_lengths": x_lengths}
332
+
333
+
334
+ def batched_synthesis(args, device, model, vocoder, denoiser, texts, spk):
335
+ total_rtf = []
336
+ total_rtf_w = []
337
+ processed_text = [process_text(i, text, "cpu") for i, text in enumerate(texts)]
338
+ dataloader = torch.utils.data.DataLoader(
339
+ BatchedSynthesisDataset(processed_text),
340
+ batch_size=args.batch_size,
341
+ collate_fn=batched_collate_fn,
342
+ num_workers=8,
343
+ )
344
+ for i, batch in enumerate(dataloader):
345
+ i = i + 1
346
+ start_t = dt.datetime.now()
347
+ b = batch["x"].shape[0]
348
+ output = model.synthesise(
349
+ batch["x"].to(device),
350
+ batch["x_lengths"].to(device),
351
+ n_timesteps=args.steps,
352
+ temperature=args.temperature,
353
+ spks=spk.expand(b) if spk is not None else spk,
354
+ length_scale=args.speaking_rate,
355
+ )
356
+
357
+ output["waveform"] = to_waveform(output["mel"], vocoder, denoiser, args.denoiser_strength)
358
+ t = (dt.datetime.now() - start_t).total_seconds()
359
+ rtf_w = t * args.sample_rate / (output["waveform"].shape[-1])
360
+ print(f"[🍡-Batch: {i}] Matcha-TTS RTF: {output['rtf']:.4f}")
361
+ print(f"[🍡-Batch: {i}] Matcha-TTS + VOCODER RTF: {rtf_w:.4f}")
362
+ total_rtf.append(output["rtf"])
363
+ total_rtf_w.append(rtf_w)
364
+ for j in range(output["mel"].shape[0]):
365
+ base_name = f"utterance_{j:03d}_speaker_{args.spk:03d}" if args.spk is not None else f"utterance_{j:03d}"
366
+ length = output["mel_lengths"][j]
367
+ new_dict = {"mel": output["mel"][j][:, :length], "waveform": output["waveform"][j][: length * 256]}
368
+ location = save_to_folder(base_name, new_dict, args.output_folder, args.sample_rate)
369
+ print(f"[🍡-{j}] Waveform saved: {location}")
370
+
371
+ print("".join(["="] * 100))
372
+ print(f"[🍡] Average Matcha-TTS RTF: {np.mean(total_rtf):.4f} ± {np.std(total_rtf)}")
373
+ print(f"[🍡] Average Matcha-TTS + VOCODER RTF: {np.mean(total_rtf_w):.4f} ± {np.std(total_rtf_w)}")
374
+ print("[🍡] Enjoy the freshly whisked 🍡 Matcha-TTS!")
375
+
376
+
377
+ def unbatched_synthesis(args, device, model, vocoder, denoiser, texts, spk):
378
+ total_rtf = []
379
+ total_rtf_w = []
380
+ for i, text in enumerate(texts):
381
+ i = i + 1
382
+ base_name = f"utterance_{i:03d}_speaker_{args.spk:03d}" if args.spk is not None else f"utterance_{i:03d}"
383
+
384
+ print("".join(["="] * 100))
385
+ text = text.strip()
386
+ text_processed = process_text(i, text, device)
387
+
388
+ print(f"[🍡] Whisking Matcha-T(ea)TS for: {i}")
389
+ start_t = dt.datetime.now()
390
+ output = model.synthesise(
391
+ text_processed["x"],
392
+ text_processed["x_lengths"],
393
+ n_timesteps=args.steps,
394
+ temperature=args.temperature,
395
+ spks=spk,
396
+ length_scale=args.speaking_rate,
397
+ )
398
+ output["waveform"] = to_waveform(output["mel"], vocoder, denoiser, args.denoiser_strength)
399
+ # RTF with HiFiGAN
400
+ t = (dt.datetime.now() - start_t).total_seconds()
401
+ rtf_w = t * args.sample_rate / (output["waveform"].shape[-1])
402
+ print(f"[🍡-{i}] Matcha-TTS RTF: {output['rtf']:.4f}")
403
+ print(f"[🍡-{i}] Matcha-TTS + VOCODER RTF: {rtf_w:.4f}")
404
+ total_rtf.append(output["rtf"])
405
+ total_rtf_w.append(rtf_w)
406
+
407
+ location = save_to_folder(base_name, output, args.output_folder, args.sample_rate)
408
+ print(f"[+] Waveform saved: {location}")
409
+
410
+ print("".join(["="] * 100))
411
+ print(f"[🍡] Average Matcha-TTS RTF: {np.mean(total_rtf):.4f} ± {np.std(total_rtf)}")
412
+ print(f"[🍡] Average Matcha-TTS + VOCODER RTF: {np.mean(total_rtf_w):.4f} ± {np.std(total_rtf_w)}")
413
+ print("[🍡] Enjoy the freshly whisked 🍡 Matcha-TTS!")
414
+
415
+
416
+ def print_config(args):
417
+ print("[!] Configurations: ")
418
+ print(f"\t- Model: {args.model}")
419
+ print(f"\t- Vocoder: {args.vocoder}")
420
+ print(f"\t- Temperature: {args.temperature}")
421
+ print(f"\t- Speaking rate: {args.speaking_rate}")
422
+ print(f"\t- Number of ODE steps: {args.steps}")
423
+ print(f"\t- Speaker: {args.spk}")
424
+ print(f"\t- Sample rate: {args.sample_rate}")
425
+
426
+
427
+ def get_device(args):
428
+ if torch.cuda.is_available() and not args.cpu:
429
+ print("[+] GPU Available! Using GPU")
430
+ device = torch.device("cuda")
431
+ else:
432
+ print("[-] GPU not available or forced CPU run! Using CPU")
433
+ device = torch.device("cpu")
434
+ return device
435
+
436
+
437
+ if __name__ == "__main__":
438
+ cli()