|
|
import os |
|
|
import re |
|
|
import json |
|
|
import shutil |
|
|
import argparse |
|
|
import librosa |
|
|
import soundfile as sf |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
def parse_args(): |
|
|
parser = argparse.ArgumentParser(description="Reproduce mixed Code-Switching Dataset.") |
|
|
|
|
|
parser.add_argument("--secomicsc_root", type=str, required=True, |
|
|
help="Path to 'ASR-SECoMiCSC' folder (must contain TXT and WAV subfolders).") |
|
|
|
|
|
parser.add_argument("--dev_root", type=str, required=True, |
|
|
help="Path to 'ASR-DevCECoMiCSC' folder (must contain TXT and WAV subfolders).") |
|
|
|
|
|
parser.add_argument("--cs_dialogue_root", type=str, required=True, |
|
|
help="Path to CS-Dialogue 'short_wav' folder (must contain SCRIPT and WAVE).") |
|
|
|
|
|
parser.add_argument("--output_dir", type=str, default="./CS_chunks_Dataset", |
|
|
help="Directory to save processed audio and metadata.") |
|
|
|
|
|
return parser.parse_args() |
|
|
|
|
|
|
|
|
TARGET_SR = 16000 |
|
|
MIN_DURATION = 5.0 |
|
|
MAX_DURATION = 15.0 |
|
|
MAX_GAP = 1.8 |
|
|
NOISE_TAGS = ["[ENS]", "[NPS]", "[SONANT]", "[*]", "[LAUGHTER]"] |
|
|
|
|
|
|
|
|
def parse_legacy_line(line): |
|
|
line = line.strip() |
|
|
if not line: return None |
|
|
m = re.match(r"\[([\d.]+),([\d.]+)\]\s+(.*)", line) |
|
|
if not m: return None |
|
|
start, end = float(m.group(1)), float(m.group(2)) |
|
|
rest = m.group(3).split() |
|
|
if len(rest) < 2: return None |
|
|
text = " ".join(rest[2:]) if len(rest) >= 3 else rest[-1] |
|
|
is_noise = any(tag in text for tag in NOISE_TAGS) |
|
|
return {"start": start, "end": end, "text": text, "is_noise": is_noise} |
|
|
|
|
|
def process_legacy(dataset_name, specific_root_path, meta_f, audio_out_root): |
|
|
|
|
|
print(f"Processing Legacy: {dataset_name}...") |
|
|
|
|
|
txt_dir = os.path.join(specific_root_path, "TXT") |
|
|
wav_dir = os.path.join(specific_root_path, "WAV") |
|
|
|
|
|
|
|
|
sub_dir = os.path.join(audio_out_root, dataset_name) |
|
|
os.makedirs(sub_dir, exist_ok=True) |
|
|
|
|
|
if not os.path.exists(txt_dir): |
|
|
print(f"Skipping {dataset_name}: 'TXT' folder not found inside {specific_root_path}") |
|
|
return |
|
|
|
|
|
files = [f for f in os.listdir(txt_dir) if f.endswith(".txt")] |
|
|
|
|
|
for txt_file in tqdm(files, desc=dataset_name): |
|
|
wav_file = txt_file.replace(".txt", ".wav") |
|
|
wav_path = os.path.join(wav_dir, wav_file) |
|
|
txt_path = os.path.join(txt_dir, txt_file) |
|
|
|
|
|
if not os.path.exists(wav_path): continue |
|
|
|
|
|
try: |
|
|
audio, sr = librosa.load(wav_path, sr=TARGET_SR, mono=True) |
|
|
except: continue |
|
|
|
|
|
with open(txt_path, encoding="utf-8") as f: |
|
|
segments = [parse_legacy_line(l) for l in f if parse_legacy_line(l)] |
|
|
segments.sort(key=lambda x: x["start"]) |
|
|
|
|
|
buffer = [] |
|
|
buffer_start = None |
|
|
last_end = None |
|
|
|
|
|
def flush(): |
|
|
nonlocal buffer, buffer_start |
|
|
if not buffer: return |
|
|
|
|
|
start_t = buffer_start |
|
|
end_t = buffer[-1]["end"] |
|
|
|
|
|
if int(start_t * sr) >= len(audio) or int(end_t * sr) > len(audio): return |
|
|
chunk = audio[int(start_t * sr): int(end_t * sr)] |
|
|
dur = len(chunk) / sr |
|
|
|
|
|
if dur < 0.5 or dur > MAX_DURATION: return |
|
|
texts = [s["text"] for s in buffer if not s["is_noise"]] |
|
|
if not texts: return |
|
|
|
|
|
|
|
|
fname = f"{dataset_name}_{os.path.basename(wav_path)[:-4]}_{int(start_t*100)}_{int(end_t*100)}.wav" |
|
|
out_path = os.path.join(sub_dir, fname) |
|
|
sf.write(out_path, chunk, sr) |
|
|
|
|
|
|
|
|
meta_f.write(json.dumps({ |
|
|
"file_name": f"audio/{dataset_name}/{fname}", |
|
|
"sentence": " ".join(texts), |
|
|
"duration": round(dur, 2), |
|
|
"source": dataset_name |
|
|
}, ensure_ascii=False) + "\n") |
|
|
|
|
|
for seg in segments: |
|
|
if not buffer: |
|
|
if seg["is_noise"]: continue |
|
|
buffer, buffer_start = [seg], seg["start"] |
|
|
last_end = seg["end"] |
|
|
continue |
|
|
|
|
|
gap = seg["start"] - last_end |
|
|
est_dur = seg["end"] - buffer_start |
|
|
|
|
|
if gap > MAX_GAP or est_dur > MAX_DURATION: |
|
|
flush() |
|
|
buffer = [] if seg["is_noise"] else [seg] |
|
|
buffer_start = seg["start"] if buffer else None |
|
|
else: |
|
|
buffer.append(seg) |
|
|
last_end = seg["end"] |
|
|
flush() |
|
|
|
|
|
|
|
|
def process_cs_dialogue(source_root, meta_f, audio_out_root): |
|
|
DATASET_NAME = "CS_Dialogue" |
|
|
|
|
|
script_dir = os.path.join(source_root, "SCRIPT") |
|
|
wave_root = os.path.join(source_root, "WAVE", "C0") |
|
|
sub_dir = os.path.join(audio_out_root, DATASET_NAME) |
|
|
os.makedirs(sub_dir, exist_ok=True) |
|
|
|
|
|
if not os.path.exists(script_dir): |
|
|
print(f"CS-Dialogue SCRIPT dir not found: {script_dir}") |
|
|
return |
|
|
|
|
|
txt_files = [f for f in os.listdir(script_dir) if f.endswith(".txt")] |
|
|
|
|
|
for txt_file in tqdm(txt_files, desc=DATASET_NAME): |
|
|
txt_path = os.path.join(script_dir, txt_file) |
|
|
session_id = os.path.splitext(txt_file)[0] |
|
|
src_audio_folder = os.path.join(wave_root, session_id) |
|
|
|
|
|
if not os.path.exists(src_audio_folder): continue |
|
|
|
|
|
with open(txt_path, 'r', encoding='utf-8') as f: |
|
|
for line in f: |
|
|
line = line.strip() |
|
|
if not line: continue |
|
|
|
|
|
parts = line.split(maxsplit=2) |
|
|
if len(parts) < 3: continue |
|
|
|
|
|
fname_raw, tag, text = parts[0], parts[1], parts[2] |
|
|
|
|
|
if tag != "<MIX>": continue |
|
|
|
|
|
if not fname_raw.endswith(".wav"): fname_raw += ".wav" |
|
|
src_wav = os.path.join(src_audio_folder, fname_raw) |
|
|
|
|
|
if os.path.exists(src_wav): |
|
|
dst_wav = os.path.join(sub_dir, fname_raw) |
|
|
shutil.copy2(src_wav, dst_wav) |
|
|
|
|
|
try: |
|
|
dur = librosa.get_duration(path=dst_wav) |
|
|
except: |
|
|
dur = 0.0 |
|
|
|
|
|
meta_f.write(json.dumps({ |
|
|
"file_name": f"audio/{DATASET_NAME}/{fname_raw}", |
|
|
"sentence": text, |
|
|
"duration": round(dur, 2), |
|
|
"source": DATASET_NAME, |
|
|
"original_tag": tag |
|
|
}, ensure_ascii=False) + "\n") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
args = parse_args() |
|
|
|
|
|
audio_out = os.path.join(args.output_dir, "audio") |
|
|
meta_path = os.path.join(args.output_dir, "metadata.jsonl") |
|
|
|
|
|
os.makedirs(audio_out, exist_ok=True) |
|
|
|
|
|
with open(meta_path, 'w', encoding='utf-8') as mf: |
|
|
process_legacy("SECoMiCSC", args.secomicsc_root, mf, audio_out) |
|
|
process_legacy("DevCECoMiCSC", args.dev_root, mf, audio_out) |
|
|
process_cs_dialogue(args.cs_dialogue_root, mf, audio_out) |
|
|
|
|
|
print(f"\nAll Done! Dataset ready at: {args.output_dir}") |