File size: 3,230 Bytes
cb35c36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import os
import torch
import torchaudio
from tqdm import tqdm
from chatterbox.tts_turbo import ChatterboxTurboTTS

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
BASE_DIR = "/home/cloud/StyleTTS2-fine-tuning"
OUTPUT_DIR = os.path.join(BASE_DIR, "Data")

REFERENCE_AUDIO_PATH = os.path.join(OUTPUT_DIR, "reference_wavs/british_accent_audio.wav")
INPUT_TEXT_FILE = os.path.join(OUTPUT_DIR, "source_text_final.txt")
TRAIN_LIST_PATH = os.path.join(OUTPUT_DIR, "train_list_new.txt")
WAVS_DIR = os.path.join(OUTPUT_DIR, "wavs")
TARGET_SAMPLE_RATE = 24000

os.makedirs(WAVS_DIR, exist_ok=True)

model = ChatterboxTurboTTS.from_pretrained(device=DEVICE)

def get_sentences(text_path):
    if not os.path.exists(text_path):
        return []
    
    with open(text_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    
    valid_sentences = []
    for line in lines:
        cleaned = line.strip() 
        if cleaned:  
            valid_sentences.append(cleaned)
    
    return valid_sentences

def get_completed_indices():
    if not os.path.exists(TRAIN_LIST_PATH):
        return set()
    
    completed = set()
    with open(TRAIN_LIST_PATH, "r", encoding="utf-8") as f:
        for line in f:
            parts = line.strip().split("|")
            if parts and len(parts) >= 1:
                filename = parts[0]
                try:
                    number_part = filename.split("_")[1].split(".")[0]
                    completed.add(int(number_part))
                except:
                    continue
    return completed

def generate_dataset():
    sentences = get_sentences(INPUT_TEXT_FILE)
    completed_indices = get_completed_indices()
    
    print(f"Total sentences: {len(sentences)}")
    print(f"Already done:    {len(completed_indices)}")
    
    resampler = None
    if model.sr != TARGET_SAMPLE_RATE:
        resampler = torchaudio.transforms.Resample(orig_freq=model.sr, new_freq=TARGET_SAMPLE_RATE).to(DEVICE)

    for i, sentence in enumerate(tqdm(sentences)):
        if (i + 1) in completed_indices:
            continue

        filename = f"file_{i+1:04d}.wav"
        filepath = os.path.join(WAVS_DIR, filename)
        print(f"Generating {filename}...")
        try:
            with torch.inference_mode():
                wav_tensor = model.generate(
                    sentence,
                    audio_prompt_path=REFERENCE_AUDIO_PATH
                )

            if wav_tensor.dim() == 1:
                wav_tensor = wav_tensor.unsqueeze(0)

            if wav_tensor.shape[0] > 1:
                wav_tensor = wav_tensor.mean(dim=0, keepdim=True)

            wav_tensor = wav_tensor.cpu()

            if resampler:
                wav_tensor = resampler(wav_tensor)

            torchaudio.save(filepath, wav_tensor, TARGET_SAMPLE_RATE)

            with open(TRAIN_LIST_PATH, "a", encoding="utf-8") as f:
                f.write(f"{filename}|{sentence}|0\n")

            del wav_tensor
            torch.cuda.empty_cache()

            if (i + 1) % 50 == 0:
                torch.cuda.synchronize()

        except Exception as e:
            print(f"Error at sample {i+1}: {e}")
            continue


if __name__ == "__main__":
    generate_dataset()