1uckyan commited on
Commit
1ba8d1f
·
verified ·
1 Parent(s): 6e23c5e

Upload data_preperation.py

Browse files
Files changed (1) hide show
  1. data_preperation.py +199 -0
data_preperation.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import shutil
5
+ import argparse
6
+ import librosa
7
+ import soundfile as sf
8
+ from tqdm import tqdm
9
+
10
+ # ARGS CONFIGURATION
11
+ def parse_args():
12
+ parser = argparse.ArgumentParser(description="Reproduce mixed Code-Switching Dataset.")
13
+
14
+ parser.add_argument("--secomicsc_root", type=str, required=True,
15
+ help="Path to 'ASR-SECoMiCSC' folder (must contain TXT and WAV subfolders).")
16
+
17
+ parser.add_argument("--dev_root", type=str, required=True,
18
+ help="Path to 'ASR-DevCECoMiCSC' folder (must contain TXT and WAV subfolders).")
19
+
20
+ parser.add_argument("--cs_dialogue_root", type=str, required=True,
21
+ help="Path to CS-Dialogue 'short_wav' folder (must contain SCRIPT and WAVE).")
22
+
23
+ parser.add_argument("--output_dir", type=str, default="./CS_chunks_Dataset",
24
+ help="Directory to save processed audio and metadata.")
25
+
26
+ return parser.parse_args()
27
+
28
+ # CONSTANTS
29
+ TARGET_SR = 16000
30
+ MIN_DURATION = 5.0
31
+ MAX_DURATION = 15.0
32
+ MAX_GAP = 1.8
33
+ NOISE_TAGS = ["[ENS]", "[NPS]", "[SONANT]", "[*]", "[LAUGHTER]"]
34
+
35
+ # LEGACY PROCESSING LOGIC
36
+ def parse_legacy_line(line):
37
+ line = line.strip()
38
+ if not line: return None
39
+ m = re.match(r"\[([\d.]+),([\d.]+)\]\s+(.*)", line)
40
+ if not m: return None
41
+ start, end = float(m.group(1)), float(m.group(2))
42
+ rest = m.group(3).split()
43
+ if len(rest) < 2: return None
44
+ text = " ".join(rest[2:]) if len(rest) >= 3 else rest[-1]
45
+ is_noise = any(tag in text for tag in NOISE_TAGS)
46
+ return {"start": start, "end": end, "text": text, "is_noise": is_noise}
47
+
48
+ def process_legacy(dataset_name, specific_root_path, meta_f, audio_out_root):
49
+
50
+ print(f"Processing Legacy: {dataset_name}...")
51
+
52
+ txt_dir = os.path.join(specific_root_path, "TXT")
53
+ wav_dir = os.path.join(specific_root_path, "WAV")
54
+
55
+ # audio/SECoMiCSC
56
+ sub_dir = os.path.join(audio_out_root, dataset_name)
57
+ os.makedirs(sub_dir, exist_ok=True)
58
+
59
+ if not os.path.exists(txt_dir):
60
+ print(f"Skipping {dataset_name}: 'TXT' folder not found inside {specific_root_path}")
61
+ return
62
+
63
+ files = [f for f in os.listdir(txt_dir) if f.endswith(".txt")]
64
+
65
+ for txt_file in tqdm(files, desc=dataset_name):
66
+ wav_file = txt_file.replace(".txt", ".wav")
67
+ wav_path = os.path.join(wav_dir, wav_file)
68
+ txt_path = os.path.join(txt_dir, txt_file)
69
+
70
+ if not os.path.exists(wav_path): continue
71
+
72
+ try:
73
+ audio, sr = librosa.load(wav_path, sr=TARGET_SR, mono=True)
74
+ except: continue
75
+
76
+ with open(txt_path, encoding="utf-8") as f:
77
+ segments = [parse_legacy_line(l) for l in f if parse_legacy_line(l)]
78
+ segments.sort(key=lambda x: x["start"])
79
+
80
+ buffer = []
81
+ buffer_start = None
82
+ last_end = None
83
+
84
+ def flush():
85
+ nonlocal buffer, buffer_start
86
+ if not buffer: return
87
+
88
+ start_t = buffer_start
89
+ end_t = buffer[-1]["end"]
90
+
91
+ if int(start_t * sr) >= len(audio) or int(end_t * sr) > len(audio): return
92
+ chunk = audio[int(start_t * sr): int(end_t * sr)]
93
+ dur = len(chunk) / sr
94
+
95
+ if dur < 0.5 or dur > MAX_DURATION: return
96
+ texts = [s["text"] for s in buffer if not s["is_noise"]]
97
+ if not texts: return
98
+
99
+ # Save Chunk
100
+ fname = f"{dataset_name}_{os.path.basename(wav_path)[:-4]}_{int(start_t*100)}_{int(end_t*100)}.wav"
101
+ out_path = os.path.join(sub_dir, fname)
102
+ sf.write(out_path, chunk, sr)
103
+
104
+ # Write Metadata
105
+ meta_f.write(json.dumps({
106
+ "file_name": f"audio/{dataset_name}/{fname}",
107
+ "sentence": " ".join(texts),
108
+ "duration": round(dur, 2),
109
+ "source": dataset_name
110
+ }, ensure_ascii=False) + "\n")
111
+
112
+ for seg in segments:
113
+ if not buffer:
114
+ if seg["is_noise"]: continue
115
+ buffer, buffer_start = [seg], seg["start"]
116
+ last_end = seg["end"]
117
+ continue
118
+
119
+ gap = seg["start"] - last_end
120
+ est_dur = seg["end"] - buffer_start
121
+
122
+ if gap > MAX_GAP or est_dur > MAX_DURATION:
123
+ flush()
124
+ buffer = [] if seg["is_noise"] else [seg]
125
+ buffer_start = seg["start"] if buffer else None
126
+ else:
127
+ buffer.append(seg)
128
+ last_end = seg["end"]
129
+ flush()
130
+
131
+ # CS-DIALOGUE PROCESSING LOGIC
132
+ def process_cs_dialogue(source_root, meta_f, audio_out_root):
133
+ DATASET_NAME = "CS_Dialogue"
134
+
135
+ script_dir = os.path.join(source_root, "SCRIPT")
136
+ wave_root = os.path.join(source_root, "WAVE", "C0")
137
+ sub_dir = os.path.join(audio_out_root, DATASET_NAME)
138
+ os.makedirs(sub_dir, exist_ok=True)
139
+
140
+ if not os.path.exists(script_dir):
141
+ print(f"CS-Dialogue SCRIPT dir not found: {script_dir}")
142
+ return
143
+
144
+ txt_files = [f for f in os.listdir(script_dir) if f.endswith(".txt")]
145
+
146
+ for txt_file in tqdm(txt_files, desc=DATASET_NAME):
147
+ txt_path = os.path.join(script_dir, txt_file)
148
+ session_id = os.path.splitext(txt_file)[0]
149
+ src_audio_folder = os.path.join(wave_root, session_id)
150
+
151
+ if not os.path.exists(src_audio_folder): continue
152
+
153
+ with open(txt_path, 'r', encoding='utf-8') as f:
154
+ for line in f:
155
+ line = line.strip()
156
+ if not line: continue
157
+
158
+ parts = line.split(maxsplit=2)
159
+ if len(parts) < 3: continue
160
+
161
+ fname_raw, tag, text = parts[0], parts[1], parts[2]
162
+
163
+ if tag != "<MIX>": continue
164
+
165
+ if not fname_raw.endswith(".wav"): fname_raw += ".wav"
166
+ src_wav = os.path.join(src_audio_folder, fname_raw)
167
+
168
+ if os.path.exists(src_wav):
169
+ dst_wav = os.path.join(sub_dir, fname_raw)
170
+ shutil.copy2(src_wav, dst_wav)
171
+
172
+ try:
173
+ dur = librosa.get_duration(path=dst_wav)
174
+ except:
175
+ dur = 0.0
176
+
177
+ meta_f.write(json.dumps({
178
+ "file_name": f"audio/{DATASET_NAME}/{fname_raw}",
179
+ "sentence": text,
180
+ "duration": round(dur, 2),
181
+ "source": DATASET_NAME,
182
+ "original_tag": tag
183
+ }, ensure_ascii=False) + "\n")
184
+
185
+ # MAIN ENTRY
186
+ if __name__ == "__main__":
187
+ args = parse_args()
188
+
189
+ audio_out = os.path.join(args.output_dir, "audio")
190
+ meta_path = os.path.join(args.output_dir, "metadata.jsonl")
191
+
192
+ os.makedirs(audio_out, exist_ok=True)
193
+
194
+ with open(meta_path, 'w', encoding='utf-8') as mf:
195
+ process_legacy("SECoMiCSC", args.secomicsc_root, mf, audio_out)
196
+ process_legacy("DevCECoMiCSC", args.dev_root, mf, audio_out)
197
+ process_cs_dialogue(args.cs_dialogue_root, mf, audio_out)
198
+
199
+ print(f"\nAll Done! Dataset ready at: {args.output_dir}")