ashishkblink commited on
Commit
6b69eec
·
verified ·
1 Parent(s): ac92a73

Upload f5_tts/train/datasets/prepare_emilia.py with huggingface_hub

Browse files
f5_tts/train/datasets/prepare_emilia.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Emilia Dataset: https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07
2
+ # if use updated new version, i.e. WebDataset, feel free to modify / draft your own script
3
+
4
+ # generate audio text map for Emilia ZH & EN
5
+ # evaluate for vocab size
6
+
7
+ import os
8
+ import sys
9
+
10
+ sys.path.append(os.getcwd())
11
+
12
+ import json
13
+ from concurrent.futures import ProcessPoolExecutor
14
+ from importlib.resources import files
15
+ from pathlib import Path
16
+ from tqdm import tqdm
17
+
18
+ from datasets.arrow_writer import ArrowWriter
19
+
20
+ from f5_tts.model.utils import (
21
+ repetition_found,
22
+ convert_char_to_pinyin,
23
+ )
24
+
25
+
26
+ out_zh = {
27
+ "ZH_B00041_S06226",
28
+ "ZH_B00042_S09204",
29
+ "ZH_B00065_S09430",
30
+ "ZH_B00065_S09431",
31
+ "ZH_B00066_S09327",
32
+ "ZH_B00066_S09328",
33
+ }
34
+ zh_filters = ["い", "て"]
35
+ # seems synthesized audios, or heavily code-switched
36
+ out_en = {
37
+ "EN_B00013_S00913",
38
+ "EN_B00042_S00120",
39
+ "EN_B00055_S04111",
40
+ "EN_B00061_S00693",
41
+ "EN_B00061_S01494",
42
+ "EN_B00061_S03375",
43
+ "EN_B00059_S00092",
44
+ "EN_B00111_S04300",
45
+ "EN_B00100_S03759",
46
+ "EN_B00087_S03811",
47
+ "EN_B00059_S00950",
48
+ "EN_B00089_S00946",
49
+ "EN_B00078_S05127",
50
+ "EN_B00070_S04089",
51
+ "EN_B00074_S09659",
52
+ "EN_B00061_S06983",
53
+ "EN_B00061_S07060",
54
+ "EN_B00059_S08397",
55
+ "EN_B00082_S06192",
56
+ "EN_B00091_S01238",
57
+ "EN_B00089_S07349",
58
+ "EN_B00070_S04343",
59
+ "EN_B00061_S02400",
60
+ "EN_B00076_S01262",
61
+ "EN_B00068_S06467",
62
+ "EN_B00076_S02943",
63
+ "EN_B00064_S05954",
64
+ "EN_B00061_S05386",
65
+ "EN_B00066_S06544",
66
+ "EN_B00076_S06944",
67
+ "EN_B00072_S08620",
68
+ "EN_B00076_S07135",
69
+ "EN_B00076_S09127",
70
+ "EN_B00065_S00497",
71
+ "EN_B00059_S06227",
72
+ "EN_B00063_S02859",
73
+ "EN_B00075_S01547",
74
+ "EN_B00061_S08286",
75
+ "EN_B00079_S02901",
76
+ "EN_B00092_S03643",
77
+ "EN_B00096_S08653",
78
+ "EN_B00063_S04297",
79
+ "EN_B00063_S04614",
80
+ "EN_B00079_S04698",
81
+ "EN_B00104_S01666",
82
+ "EN_B00061_S09504",
83
+ "EN_B00061_S09694",
84
+ "EN_B00065_S05444",
85
+ "EN_B00063_S06860",
86
+ "EN_B00065_S05725",
87
+ "EN_B00069_S07628",
88
+ "EN_B00083_S03875",
89
+ "EN_B00071_S07665",
90
+ "EN_B00071_S07665",
91
+ "EN_B00062_S04187",
92
+ "EN_B00065_S09873",
93
+ "EN_B00065_S09922",
94
+ "EN_B00084_S02463",
95
+ "EN_B00067_S05066",
96
+ "EN_B00106_S08060",
97
+ "EN_B00073_S06399",
98
+ "EN_B00073_S09236",
99
+ "EN_B00087_S00432",
100
+ "EN_B00085_S05618",
101
+ "EN_B00064_S01262",
102
+ "EN_B00072_S01739",
103
+ "EN_B00059_S03913",
104
+ "EN_B00069_S04036",
105
+ "EN_B00067_S05623",
106
+ "EN_B00060_S05389",
107
+ "EN_B00060_S07290",
108
+ "EN_B00062_S08995",
109
+ }
110
+ en_filters = ["ا", "い", "て"]
111
+
112
+
113
+ def deal_with_audio_dir(audio_dir):
114
+ audio_jsonl = audio_dir.with_suffix(".jsonl")
115
+ sub_result, durations = [], []
116
+ vocab_set = set()
117
+ bad_case_zh = 0
118
+ bad_case_en = 0
119
+ with open(audio_jsonl, "r") as f:
120
+ lines = f.readlines()
121
+ for line in tqdm(lines, desc=f"{audio_jsonl.stem}"):
122
+ obj = json.loads(line)
123
+ text = obj["text"]
124
+ if obj["language"] == "zh":
125
+ if obj["wav"].split("/")[1] in out_zh or any(f in text for f in zh_filters) or repetition_found(text):
126
+ bad_case_zh += 1
127
+ continue
128
+ else:
129
+ text = text.translate(
130
+ str.maketrans({",": ",", "!": "!", "?": "?"})
131
+ ) # not "。" cuz much code-switched
132
+ if obj["language"] == "en":
133
+ if (
134
+ obj["wav"].split("/")[1] in out_en
135
+ or any(f in text for f in en_filters)
136
+ or repetition_found(text, length=4)
137
+ ):
138
+ bad_case_en += 1
139
+ continue
140
+ if tokenizer == "pinyin":
141
+ text = convert_char_to_pinyin([text], polyphone=polyphone)[0]
142
+ duration = obj["duration"]
143
+ sub_result.append({"audio_path": str(audio_dir.parent / obj["wav"]), "text": text, "duration": duration})
144
+ durations.append(duration)
145
+ vocab_set.update(list(text))
146
+ return sub_result, durations, vocab_set, bad_case_zh, bad_case_en
147
+
148
+
149
+ def main():
150
+ assert tokenizer in ["pinyin", "char"]
151
+ result = []
152
+ duration_list = []
153
+ text_vocab_set = set()
154
+ total_bad_case_zh = 0
155
+ total_bad_case_en = 0
156
+
157
+ # process raw data
158
+ executor = ProcessPoolExecutor(max_workers=max_workers)
159
+ futures = []
160
+ for lang in langs:
161
+ dataset_path = Path(os.path.join(dataset_dir, lang))
162
+ [
163
+ futures.append(executor.submit(deal_with_audio_dir, audio_dir))
164
+ for audio_dir in dataset_path.iterdir()
165
+ if audio_dir.is_dir()
166
+ ]
167
+ for futures in tqdm(futures, total=len(futures)):
168
+ sub_result, durations, vocab_set, bad_case_zh, bad_case_en = futures.result()
169
+ result.extend(sub_result)
170
+ duration_list.extend(durations)
171
+ text_vocab_set.update(vocab_set)
172
+ total_bad_case_zh += bad_case_zh
173
+ total_bad_case_en += bad_case_en
174
+ executor.shutdown()
175
+
176
+ # save preprocessed dataset to disk
177
+ if not os.path.exists(f"{save_dir}"):
178
+ os.makedirs(f"{save_dir}")
179
+ print(f"\nSaving to {save_dir} ...")
180
+
181
+ # dataset = Dataset.from_dict({"audio_path": audio_path_list, "text": text_list, "duration": duration_list}) # oom
182
+ # dataset.save_to_disk(f"{save_dir}/raw", max_shard_size="2GB")
183
+ with ArrowWriter(path=f"{save_dir}/raw.arrow") as writer:
184
+ for line in tqdm(result, desc="Writing to raw.arrow ..."):
185
+ writer.write(line)
186
+
187
+ # dup a json separately saving duration in case for DynamicBatchSampler ease
188
+ with open(f"{save_dir}/duration.json", "w", encoding="utf-8") as f:
189
+ json.dump({"duration": duration_list}, f, ensure_ascii=False)
190
+
191
+ # vocab map, i.e. tokenizer
192
+ # add alphabets and symbols (optional, if plan to ft on de/fr etc.)
193
+ # if tokenizer == "pinyin":
194
+ # text_vocab_set.update([chr(i) for i in range(32, 127)] + [chr(i) for i in range(192, 256)])
195
+ with open(f"{save_dir}/vocab.txt", "w") as f:
196
+ for vocab in sorted(text_vocab_set):
197
+ f.write(vocab + "\n")
198
+
199
+ print(f"\nFor {dataset_name}, sample count: {len(result)}")
200
+ print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}")
201
+ print(f"For {dataset_name}, total {sum(duration_list)/3600:.2f} hours")
202
+ if "ZH" in langs:
203
+ print(f"Bad zh transcription case: {total_bad_case_zh}")
204
+ if "EN" in langs:
205
+ print(f"Bad en transcription case: {total_bad_case_en}\n")
206
+
207
+
208
+ if __name__ == "__main__":
209
+ max_workers = 32
210
+
211
+ tokenizer = "pinyin" # "pinyin" | "char"
212
+ polyphone = True
213
+
214
+ langs = ["ZH", "EN"]
215
+ dataset_dir = "<SOME_PATH>/Emilia_Dataset/raw"
216
+ dataset_name = f"Emilia_{'_'.join(langs)}_{tokenizer}"
217
+ save_dir = str(files("f5_tts").joinpath("../../")) + f"/data/{dataset_name}"
218
+ print(f"\nPrepare for {dataset_name}, will save to {save_dir}\n")
219
+
220
+ main()
221
+
222
+ # Emilia ZH & EN
223
+ # samples count 37837916 (after removal)
224
+ # pinyin vocab size 2543 (polyphone)
225
+ # total duration 95281.87 (hours)
226
+ # bad zh asr cnt 230435 (samples)
227
+ # bad eh asr cnt 37217 (samples)
228
+
229
+ # vocab size may be slightly different due to jieba tokenizer and pypinyin (e.g. way of polyphoneme)
230
+ # please be careful if using pretrained model, make sure the vocab.txt is same