| import os |
| import sys |
|
|
| sys.path.append(os.getcwd()) |
|
|
| import json |
| from concurrent.futures import ProcessPoolExecutor |
| from importlib.resources import files |
| from pathlib import Path |
| from tqdm import tqdm |
| import soundfile as sf |
| from datasets.arrow_writer import ArrowWriter |
|
|
|
|
| def deal_with_audio_dir(audio_dir): |
| sub_result, durations = [], [] |
| vocab_set = set() |
| audio_lists = list(audio_dir.rglob("*.wav")) |
|
|
| for line in audio_lists: |
| text_path = line.with_suffix(".normalized.txt") |
| text = open(text_path, "r").read().strip() |
| duration = sf.info(line).duration |
| if duration < 0.4 or duration > 30: |
| continue |
| sub_result.append({"audio_path": str(line), "text": text, "duration": duration}) |
| durations.append(duration) |
| vocab_set.update(list(text)) |
| return sub_result, durations, vocab_set |
|
|
|
|
| def main(): |
| result = [] |
| duration_list = [] |
| text_vocab_set = set() |
|
|
| |
| executor = ProcessPoolExecutor(max_workers=max_workers) |
| futures = [] |
|
|
| for subset in tqdm(SUB_SET): |
| dataset_path = Path(os.path.join(dataset_dir, subset)) |
| [ |
| futures.append(executor.submit(deal_with_audio_dir, audio_dir)) |
| for audio_dir in dataset_path.iterdir() |
| if audio_dir.is_dir() |
| ] |
| for future in tqdm(futures, total=len(futures)): |
| sub_result, durations, vocab_set = future.result() |
| result.extend(sub_result) |
| duration_list.extend(durations) |
| text_vocab_set.update(vocab_set) |
| executor.shutdown() |
|
|
| |
| if not os.path.exists(f"{save_dir}"): |
| os.makedirs(f"{save_dir}") |
| print(f"\nSaving to {save_dir} ...") |
|
|
| with ArrowWriter(path=f"{save_dir}/raw.arrow") as writer: |
| for line in tqdm(result, desc="Writing to raw.arrow ..."): |
| writer.write(line) |
|
|
| |
| with open(f"{save_dir}/duration.json", "w", encoding="utf-8") as f: |
| json.dump({"duration": duration_list}, f, ensure_ascii=False) |
|
|
| |
| with open(f"{save_dir}/vocab.txt", "w") as f: |
| for vocab in sorted(text_vocab_set): |
| f.write(vocab + "\n") |
|
|
| print(f"\nFor {dataset_name}, sample count: {len(result)}") |
| print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}") |
| print(f"For {dataset_name}, total {sum(duration_list)/3600:.2f} hours") |
|
|
|
|
| if __name__ == "__main__": |
| max_workers = 36 |
|
|
| tokenizer = "char" |
|
|
| SUB_SET = ["train-clean-100", "train-clean-360", "train-other-500"] |
| dataset_dir = "<SOME_PATH>/LibriTTS" |
| dataset_name = f"LibriTTS_{'_'.join(SUB_SET)}_{tokenizer}".replace("train-clean-", "").replace("train-other-", "") |
| save_dir = str(files("f5_tts").joinpath("../../")) + f"/data/{dataset_name}" |
| print(f"\nPrepare for {dataset_name}, will save to {save_dir}\n") |
| main() |
|
|
| |
| |
| |
|
|