Upload train_vits.py with huggingface_hub
Browse files- train_vits.py +102 -0
train_vits.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from trainer import Trainer, TrainerArgs
|
| 4 |
+
|
| 5 |
+
from TTS.tts.configs.shared_configs import BaseDatasetConfig, CharactersConfig
|
| 6 |
+
from TTS.tts.configs.vits_config import VitsConfig
|
| 7 |
+
from TTS.tts.datasets import load_tts_samples
|
| 8 |
+
from TTS.tts.models.vits import Vits, VitsAudioConfig
|
| 9 |
+
from TTS.tts.utils.text.tokenizer import TTSTokenizer
|
| 10 |
+
from TTS.utils.audio import AudioProcessor
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
if __name__ == "__main__":
|
| 14 |
+
output_path = os.path.dirname(os.path.abspath(__file__))
|
| 15 |
+
dataset_config = BaseDatasetConfig(
|
| 16 |
+
formatter="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "/home/fatima-w/workpj/texttospeech/yal_asheh1_book")
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
audio_config = VitsAudioConfig(
|
| 20 |
+
sample_rate=22050,
|
| 21 |
+
win_length=1024,
|
| 22 |
+
hop_length=256,
|
| 23 |
+
num_mels=80,
|
| 24 |
+
mel_fmin=0,
|
| 25 |
+
mel_fmax=None
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
character_config = CharactersConfig(
|
| 29 |
+
characters="ىءۀابتثجحخدذرزسشصضطظعغفقلمنهويِپچژکگیآأؤإئًَُّْ'ٔ٪۰۱۲۳۴۵۶۷۸۹ةabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZكأي",
|
| 30 |
+
# characters="ءۀابتثجحخدذرزسشصضطظعغفقلمنهويِپچژکگیآأؤإئًَُّ۰۱۲۳۴۵۶۷۸۹ة",
|
| 31 |
+
punctuations='!(),-.–:;«»?— ̠،؛؟<>"',
|
| 32 |
+
phonemes=None,
|
| 33 |
+
pad="<PAD>",
|
| 34 |
+
eos="<EOS>",
|
| 35 |
+
bos="<BOS>",
|
| 36 |
+
blank="<BLNK>",
|
| 37 |
+
characters_class=None,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
config = VitsConfig(
|
| 41 |
+
audio=audio_config,
|
| 42 |
+
run_name="vits_farsi",
|
| 43 |
+
batch_size=20,
|
| 44 |
+
eval_batch_size=10,
|
| 45 |
+
batch_group_size=5,
|
| 46 |
+
num_loader_workers=4,
|
| 47 |
+
num_eval_loader_workers=2,
|
| 48 |
+
run_eval=True,
|
| 49 |
+
test_delay_epochs=-1,
|
| 50 |
+
epochs=1000,
|
| 51 |
+
text_cleaner="basic_cleaners",
|
| 52 |
+
use_phonemes=False,
|
| 53 |
+
phoneme_language="fa",
|
| 54 |
+
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
| 55 |
+
compute_input_seq_cache=True,
|
| 56 |
+
print_step=25,
|
| 57 |
+
print_eval=True,
|
| 58 |
+
mixed_precision=True, # Keep mixed precision enabled
|
| 59 |
+
output_path=output_path,
|
| 60 |
+
datasets=[dataset_config],
|
| 61 |
+
cudnn_benchmark=False,
|
| 62 |
+
characters=character_config,
|
| 63 |
+
test_sentences=[
|
| 64 |
+
["سلطان محمود در زمستانی سخت به طلخک گفت که: با این جامه ی یک لا در این سرما چه می کنی "],
|
| 65 |
+
["مردی نزد بقالی آمد و گفت پیاز هم ده تا دهان بدان خو شبوی سازم."],
|
| 66 |
+
["از مال خود پاره ای گوشت بستان و زیره بایی معطّر بساز"],
|
| 67 |
+
["یک بار هم از جهنم بگویید."],
|
| 68 |
+
["یکی اسبی به عاریت خواست"]
|
| 69 |
+
],
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# INITIALIZE THE AUDIO PROCESSOR
|
| 74 |
+
ap = AudioProcessor.init_from_config(config)
|
| 75 |
+
|
| 76 |
+
# INITIALIZE THE TOKENIZER
|
| 77 |
+
tokenizer, config = TTSTokenizer.init_from_config(config)
|
| 78 |
+
|
| 79 |
+
# LOAD DATA SAMPLES
|
| 80 |
+
train_samples, eval_samples = load_tts_samples(
|
| 81 |
+
dataset_config,
|
| 82 |
+
eval_split=True,
|
| 83 |
+
eval_split_max_size=config.eval_split_max_size,
|
| 84 |
+
eval_split_size=config.eval_split_size,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
# INIT MODEL
|
| 88 |
+
model = Vits(config, ap, tokenizer, speaker_manager=None)
|
| 89 |
+
|
| 90 |
+
checkpoint_path = os.path.join('/home/fatima-w/workpj/coqui/vits_farsi-April-10-2025_10+51PM-0000000', "best_model.pth")
|
| 91 |
+
|
| 92 |
+
# INIT THE TRAINER AND START TRAINING
|
| 93 |
+
trainer = Trainer(
|
| 94 |
+
TrainerArgs(restore_path=checkpoint_path),
|
| 95 |
+
# TrainerArgs(),
|
| 96 |
+
config,
|
| 97 |
+
output_path,
|
| 98 |
+
model=model,
|
| 99 |
+
train_samples=train_samples,
|
| 100 |
+
eval_samples=eval_samples,
|
| 101 |
+
)
|
| 102 |
+
trainer.fit()
|