Natalie Buranich commited on
Commit
a8fe7cc
·
1 Parent(s): 13a4f7b

Create Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +266 -0
Dockerfile ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import argparse
4
+ import os
5
+ import sys
6
+ import tempfile
7
+ import time
8
+
9
+ import torch
10
+ import torchaudio
11
+
12
+ from tortoise.api import MODELS_DIR, TextToSpeech
13
+ from tortoise.utils.audio import get_voices, load_voices, load_audio
14
+ from tortoise.utils.text import split_and_recombine_text
15
+
16
+ parser = argparse.ArgumentParser(
17
+ description='TorToiSe is a text-to-speech program that is capable of synthesizing speech '
18
+ 'in multiple voices with realistic prosody and intonation.')
19
+
20
+ parser.add_argument(
21
+ 'text', type=str, nargs='*',
22
+ help='Text to speak. If omitted, text is read from stdin.')
23
+ parser.add_argument(
24
+ '-v, --voice', type=str, default='random', metavar='VOICE', dest='voice',
25
+ help='Selects the voice to use for generation. Use the & character to join two voices together. '
26
+ 'Use a comma to perform inference on multiple voices. Set to "all" to use all available voices. '
27
+ 'Note that multiple voices require the --output-dir option to be set.')
28
+ parser.add_argument(
29
+ '-V, --voices-dir', metavar='VOICES_DIR', type=str, dest='voices_dir',
30
+ help='Path to directory containing extra voices to be loaded. Use a comma to specify multiple directories.')
31
+ parser.add_argument(
32
+ '-p, --preset', type=str, default='fast', choices=['ultra_fast', 'fast', 'standard', 'high_quality'], dest='preset',
33
+ help='Which voice quality preset to use.')
34
+ parser.add_argument(
35
+ '-q, --quiet', default=False, action='store_true', dest='quiet',
36
+ help='Suppress all output.')
37
+
38
+ output_group = parser.add_mutually_exclusive_group(required=True)
39
+ output_group.add_argument(
40
+ '-l, --list-voices', default=False, action='store_true', dest='list_voices',
41
+ help='List available voices and exit.')
42
+ output_group.add_argument(
43
+ '-P, --play', action='store_true', dest='play',
44
+ help='Play the audio (requires pydub).')
45
+ output_group.add_argument(
46
+ '-o, --output', type=str, metavar='OUTPUT', dest='output',
47
+ help='Save the audio to a file.')
48
+ output_group.add_argument(
49
+ '-O, --output-dir', type=str, metavar='OUTPUT_DIR', dest='output_dir',
50
+ help='Save the audio to a directory as individual segments.')
51
+
52
+ multi_output_group = parser.add_argument_group('multi-output options (requires --output-dir)')
53
+ multi_output_group.add_argument(
54
+ '--candidates', type=int, default=1,
55
+ help='How many output candidates to produce per-voice. Note that only the first candidate is used in the combined output.')
56
+ multi_output_group.add_argument(
57
+ '--regenerate', type=str, default=None,
58
+ help='Comma-separated list of clip numbers to re-generate.')
59
+ multi_output_group.add_argument(
60
+ '--skip-existing', action='store_true',
61
+ help='Set to skip re-generating existing clips.')
62
+
63
+ advanced_group = parser.add_argument_group('advanced options')
64
+ advanced_group.add_argument(
65
+ '--produce-debug-state', default=False, action='store_true',
66
+ help='Whether or not to produce debug_states in current directory, which can aid in reproducing problems.')
67
+ advanced_group.add_argument(
68
+ '--seed', type=int, default=None,
69
+ help='Random seed which can be used to reproduce results.')
70
+ advanced_group.add_argument(
71
+ '--models-dir', type=str, default=MODELS_DIR,
72
+ help='Where to find pretrained model checkpoints. Tortoise automatically downloads these to '
73
+ '~/.cache/tortoise/.models, so this should only be specified if you have custom checkpoints.')
74
+ advanced_group.add_argument(
75
+ '--text-split', type=str, default=None,
76
+ help='How big chunks to split the text into, in the format <desired_length>,<max_length>.')
77
+ advanced_group.add_argument(
78
+ '--disable-redaction', default=False, action='store_true',
79
+ help='Normally text enclosed in brackets are automatically redacted from the spoken output '
80
+ '(but are still rendered by the model), this can be used for prompt engineering. '
81
+ 'Set this to disable this behavior.')
82
+ advanced_group.add_argument(
83
+ '--device', type=str, default=None,
84
+ help='Device to use for inference.')
85
+ advanced_group.add_argument(
86
+ '--batch-size', type=int, default=None,
87
+ help='Batch size to use for inference. If omitted, the batch size is set based on available GPU memory.')
88
+
89
+ tuning_group = parser.add_argument_group('tuning options (overrides preset settings)')
90
+ tuning_group.add_argument(
91
+ '--num-autoregressive-samples', type=int, default=None,
92
+ help='Number of samples taken from the autoregressive model, all of which are filtered using CLVP. '
93
+ 'As TorToiSe is a probabilistic model, more samples means a higher probability of creating something "great".')
94
+ tuning_group.add_argument(
95
+ '--temperature', type=float, default=None,
96
+ help='The softmax temperature of the autoregressive model.')
97
+ tuning_group.add_argument(
98
+ '--length-penalty', type=float, default=None,
99
+ help='A length penalty applied to the autoregressive decoder. Higher settings causes the model to produce more terse outputs.')
100
+ tuning_group.add_argument(
101
+ '--repetition-penalty', type=float, default=None,
102
+ help='A penalty that prevents the autoregressive decoder from repeating itself during decoding. '
103
+ 'Can be used to reduce the incidence of long silences or "uhhhhhhs", etc.')
104
+ tuning_group.add_argument(
105
+ '--top-p', type=float, default=None,
106
+ help='P value used in nucleus sampling. 0 to 1. Lower values mean the decoder produces more "likely" (aka boring) outputs.')
107
+ tuning_group.add_argument(
108
+ '--max-mel-tokens', type=int, default=None,
109
+ help='Restricts the output length. 1 to 600. Each unit is 1/20 of a second.')
110
+ tuning_group.add_argument(
111
+ '--cvvp-amount', type=float, default=None,
112
+ help='How much the CVVP model should influence the output.'
113
+ 'Increasing this can in some cases reduce the likelihood of multiple speakers.')
114
+ tuning_group.add_argument(
115
+ '--diffusion-iterations', type=int, default=None,
116
+ help='Number of diffusion steps to perform. More steps means the network has more chances to iteratively'
117
+ 'refine the output, which should theoretically mean a higher quality output. '
118
+ 'Generally a value above 250 is not noticeably better, however.')
119
+ tuning_group.add_argument(
120
+ '--cond-free', type=bool, default=None,
121
+ help='Whether or not to perform conditioning-free diffusion. Conditioning-free diffusion performs two forward passes for '
122
+ 'each diffusion step: one with the outputs of the autoregressive model and one with no conditioning priors. The output '
123
+ 'of the two is blended according to the cond_free_k value below. Conditioning-free diffusion is the real deal, and '
124
+ 'dramatically improves realism.')
125
+ tuning_group.add_argument(
126
+ '--cond-free-k', type=float, default=None,
127
+ help='Knob that determines how to balance the conditioning free signal with the conditioning-present signal. [0,inf]. '
128
+ 'As cond_free_k increases, the output becomes dominated by the conditioning-free signal. '
129
+ 'Formula is: output=cond_present_output*(cond_free_k+1)-cond_absenct_output*cond_free_k')
130
+ tuning_group.add_argument(
131
+ '--diffusion-temperature', type=float, default=None,
132
+ help='Controls the variance of the noise fed into the diffusion model. [0,1]. Values at 0 '
133
+ 'are the "mean" prediction of the diffusion network and will sound bland and smeared. ')
134
+
135
+ usage_examples = f'''
136
+ Examples:
137
+
138
+ Read text using random voice and place it in a file:
139
+
140
+ {parser.prog} -o hello.wav "Hello, how are you?"
141
+
142
+ Read text from stdin and play it using the tom voice:
143
+
144
+ echo "Say it like you mean it!" | {parser.prog} -P -v tom
145
+
146
+ Read a text file using multiple voices and save the audio clips to a directory:
147
+
148
+ {parser.prog} -O /tmp/tts-results -v tom,emma <textfile.txt
149
+ '''
150
+
151
+ try:
152
+ args = parser.parse_args()
153
+ except SystemExit as e:
154
+ if e.code == 0:
155
+ print(usage_examples)
156
+ sys.exit(e.code)
157
+
158
+ extra_voice_dirs = args.voices_dir.split(',') if args.voices_dir else []
159
+ all_voices = sorted(get_voices(extra_voice_dirs))
160
+
161
+ if args.list_voices:
162
+ for v in all_voices:
163
+ print(v)
164
+ sys.exit(0)
165
+
166
+ selected_voices = all_voices if args.voice == 'all' else args.voice.split(',')
167
+ selected_voices = [v.split('&') if '&' in v else [v] for v in selected_voices]
168
+ for voices in selected_voices:
169
+ for v in voices:
170
+ if v != 'random' and v not in all_voices:
171
+ parser.error(f'voice {v} not available, use --list-voices to see available voices.')
172
+
173
+ if len(args.text) == 0:
174
+ text = ''
175
+ for line in sys.stdin:
176
+ text += line
177
+ else:
178
+ text = ' '.join(args.text)
179
+ text = text.strip()
180
+ if args.text_split:
181
+ desired_length, max_length = [int(x) for x in args.text_split.split(',')]
182
+ if desired_length > max_length:
183
+ parser.error(f'--text-split: desired_length ({desired_length}) must be <= max_length ({max_length})')
184
+ texts = split_and_recombine_text(text, desired_length, max_length)
185
+ else:
186
+ texts = split_and_recombine_text(text)
187
+ if len(texts) == 0:
188
+ parser.error('no text provided')
189
+
190
+ if args.output_dir:
191
+ os.makedirs(args.output_dir, exist_ok=True)
192
+ else:
193
+ if len(selected_voices) > 1:
194
+ parser.error('cannot have multiple voices without --output-dir"')
195
+ if args.candidates > 1:
196
+ parser.error('cannot have multiple candidates without --output-dir"')
197
+
198
+ # error out early if pydub isn't installed
199
+ if args.play:
200
+ try:
201
+ import pydub
202
+ import pydub.playback
203
+ except ImportError:
204
+ parser.error('--play requires pydub to be installed, which can be done with "pip install pydub"')
205
+
206
+ seed = int(time.time()) if args.seed is None else args.seed
207
+ if not args.quiet:
208
+ print('Loading tts...')
209
+ tts = TextToSpeech(models_dir=args.models_dir, enable_redaction=not args.disable_redaction,
210
+ device=args.device, autoregressive_batch_size=args.batch_size)
211
+ gen_settings = {
212
+ 'use_deterministic_seed': seed,
213
+ 'verbose': not args.quiet,
214
+ 'k': args.candidates,
215
+ 'preset': args.preset,
216
+ }
217
+ tuning_options = [
218
+ 'num_autoregressive_samples', 'temperature', 'length_penalty', 'repetition_penalty', 'top_p',
219
+ 'max_mel_tokens', 'cvvp_amount', 'diffusion_iterations', 'cond_free', 'cond_free_k', 'diffusion_temperature']
220
+ for option in tuning_options:
221
+ if getattr(args, option) is not None:
222
+ gen_settings[option] = getattr(args, option)
223
+ total_clips = len(texts) * len(selected_voices)
224
+ regenerate_clips = [int(x) for x in args.regenerate.split(',')] if args.regenerate else None
225
+ for voice_idx, voice in enumerate(selected_voices):
226
+ audio_parts = []
227
+ voice_samples, conditioning_latents = load_voices(voice, extra_voice_dirs)
228
+ for text_idx, text in enumerate(texts):
229
+ clip_name = f'{"-".join(voice)}_{text_idx:02d}'
230
+ if args.output_dir:
231
+ first_clip = os.path.join(args.output_dir, f'{clip_name}_00.wav')
232
+ if (args.skip_existing or (regenerate_clips and text_idx not in regenerate_clips)) and os.path.exists(first_clip):
233
+ audio_parts.append(load_audio(first_clip, 24000))
234
+ if not args.quiet:
235
+ print(f'Skipping {clip_name}')
236
+ continue
237
+ if not args.quiet:
238
+ print(f'Rendering {clip_name} ({(voice_idx * len(texts) + text_idx + 1)} of {total_clips})...')
239
+ print(' ' + text)
240
+ gen = tts.tts_with_preset(
241
+ text, voice_samples=voice_samples, conditioning_latents=conditioning_latents, **gen_settings)
242
+ gen = gen if args.candidates > 1 else [gen]
243
+ for candidate_idx, audio in enumerate(gen):
244
+ audio = audio.squeeze(0).cpu()
245
+ if candidate_idx == 0:
246
+ audio_parts.append(audio)
247
+ if args.output_dir:
248
+ filename = f'{clip_name}_{candidate_idx:02d}.wav'
249
+ torchaudio.save(os.path.join(args.output_dir, filename), audio, 24000)
250
+
251
+ audio = torch.cat(audio_parts, dim=-1)
252
+ if args.output_dir:
253
+ filename = f'{"-".join(voice)}_combined.wav'
254
+ torchaudio.save(os.path.join(args.output_dir, filename), audio, 24000)
255
+ elif args.output:
256
+ filename = args.output if args.output else os.tmp
257
+ torchaudio.save(args.output, audio, 24000)
258
+ elif args.play:
259
+ f = tempfile.NamedTemporaryFile(suffix='.wav', delete=True)
260
+ torchaudio.save(f.name, audio, 24000)
261
+ pydub.playback.play(pydub.AudioSegment.from_wav(f.name))
262
+
263
+ if args.produce_debug_state:
264
+ os.makedirs('debug_states', exist_ok=True)
265
+ dbg_state = (seed, texts, voice_samples, conditioning_latents, args)
266
+ torch.save(dbg_state, os.path.join('debug_states', f'debug_{"-".join(voice)}.pth'))