| import numpy as np |
| import soundfile as sf |
| import os |
| import glob |
| from scipy import signal |
| import random |
|
|
| BIGCAT_TO_ENVS = { |
| "Domestic": ["DWASHING", "DKITCHEN", "DLIVING"], |
| "Nature": ["NFIELD", "NRIVER", "NPARK"], |
| "Office": ["OOFFICE", "OHALLWAY", "OMEETING"], |
| "Public": ["PSTATION", "PCAFETER", "PRESTO"], |
| "Street": ["STRAFFIC", "SPSQUARE", "SCAFE"], |
| "Transportation": ["TMETRO", "TBUS", "TCAR"], |
| } |
|
|
| ENV_TO_BIGCAT = {env: bigcat for bigcat, envs in BIGCAT_TO_ENVS.items() for env in envs} |
|
|
| |
| WITHIN_ROTATION = { |
| |
| "DWASHING": "DKITCHEN", |
| "DKITCHEN": "DLIVING", |
| "DLIVING": "DWASHING", |
| |
| "NFIELD": "NRIVER", |
| "NRIVER": "NPARK", |
| "NPARK": "NFIELD", |
| |
| "OOFFICE": "OHALLWAY", |
| "OHALLWAY": "OMEETING", |
| "OMEETING": "OOFFICE", |
| |
| "PSTATION": "PCAFETER", |
| "PCAFETER": "PRESTO", |
| "PRESTO": "PSTATION", |
| |
| "STRAFFIC": "SPSQUARE", |
| "SPSQUARE": "SCAFE", |
| "SCAFE": "STRAFFIC", |
| |
| "TMETRO": "TBUS", |
| "TBUS": "TCAR", |
| "TCAR": "TMETRO", |
| } |
|
|
| |
| CROSS_BIGCAT_MAP = { |
| "Domestic": "Street", |
| "Nature": "Transportation", |
| "Office": "Nature", |
| "Public": "Domestic", |
| "Street": "Office", |
| "Transportation": "Public", |
| } |
|
|
|
|
| def read_wav(file_path): |
| """Read a WAV file.""" |
| return sf.read(file_path) |
|
|
| def write_wav(file_path, data, samplerate): |
| """Write a WAV file.""" |
| sf.write(file_path, data, samplerate) |
|
|
| def calculate_rms(signal): |
| """Compute RMS.""" |
| return np.sqrt(np.mean(signal**2)) |
|
|
| def add_noise(speech, noise, snr_db): |
| """Add noise to speech at the given SNR (dB).""" |
| |
| if len(noise) < len(speech): |
| |
| noise = np.tile(noise, int(np.ceil(len(speech) / len(noise))))[:len(speech)] |
| else: |
| |
| start = random.randint(0, len(noise) - len(speech)) |
| noise = noise[start:start + len(speech)] |
| |
| |
| speech_rms = calculate_rms(speech) |
| noise_rms = calculate_rms(noise) |
| |
| |
| snr_linear = 10 ** (snr_db / 20) |
| noise_rms_target = speech_rms / snr_linear |
| |
| |
| noise_adjusted = noise * (noise_rms_target / (noise_rms + 1e-10)) |
| |
| |
| noisy_speech = speech + noise_adjusted |
| |
| |
| max_val = np.max(np.abs(noisy_speech)) |
| if max_val > 1: |
| noisy_speech = noisy_speech / max_val * 0.95 |
| |
| return noisy_speech |
|
|
| def parse_speech_stem(stem: str): |
| """ |
| Parse speech filename (without extension). |
| Expected format: |
| - {ENV}_{E|I}{ID} e.g. DKITCHEN_E01 |
| - NEUTRAL_N{ID} e.g. NEUTRAL_N03 |
| Returns: (env_code, type_char, rest) |
| """ |
| parts = stem.split("_", 1) |
| if len(parts) != 2: |
| raise ValueError(f"Cannot parse speech filename (missing '_'): {stem}") |
| env_code, rest = parts[0], parts[1] |
| if not rest: |
| raise ValueError(f"Cannot parse speech filename (empty after '_'): {stem}") |
| type_char = rest[0].upper() |
| return env_code.upper(), type_char, rest |
|
|
| def build_noise_index(noise_files): |
| """Build noise env code -> file path mapping (stems uppercased).""" |
| idx = {} |
| for p in noise_files: |
| stem = os.path.splitext(os.path.basename(p))[0].upper() |
| idx[stem] = p |
| return idx |
|
|
| def get_noise_path(noise_index, env_code: str): |
| """Look up noise file path by env code in the index (exact match after uppercasing).""" |
| env_code = env_code.upper() |
| return noise_index.get(env_code) |
|
|
| def process_files(speech_folder, noise_folder, output_folder, snr_levels=[-10, -5, 0, 5 , 10], seed=None): |
| """Process all speech files.""" |
| |
| |
| speech_files = glob.glob(os.path.join(speech_folder, "*.wav")) |
| speech_files.sort() |
| |
| |
| noise_files = glob.glob(os.path.join(noise_folder, "*.wav")) |
| noise_files.sort() |
| noise_index = build_noise_index(noise_files) |
|
|
| if seed is not None: |
| random.seed(seed) |
| |
| print(f"Found {len(speech_files)} speech files") |
| print(f"Found {len(noise_files)} noise files") |
| print(f"SNR levels (dB): {snr_levels}") |
| print("=" * 60) |
| |
| |
| os.makedirs(output_folder, exist_ok=True) |
| |
| |
| |
| total_files = 0 |
| for speech_file in speech_files: |
| stem = os.path.splitext(os.path.basename(speech_file))[0] |
| try: |
| env_code, type_char, _ = parse_speech_stem(stem) |
| except Exception: |
| |
| continue |
| if type_char == "N" or env_code == "NEUTRAL": |
| total_files += 3 * len(snr_levels) |
| else: |
| total_files += 3 * len(snr_levels) |
| processed = 0 |
| |
| |
| for speech_file in speech_files: |
| |
| speech_data, speech_sr = read_wav(speech_file) |
| speech_name = os.path.splitext(os.path.basename(speech_file))[0] |
| try: |
| env_code, type_char, utt_id = parse_speech_stem(speech_name) |
| except Exception as e: |
| print(f"\nSkipping speech (unparseable filename): {speech_name}, reason: {e}") |
| continue |
| |
| print(f"\nProcessing speech: {speech_name} (duration: {len(speech_data)/speech_sr:.2f} s)") |
|
|
| |
| selected_noise_envs = [] |
|
|
| is_neutral = (type_char == "N") or (env_code == "NEUTRAL") |
| if is_neutral: |
| |
| bigcats = random.sample(list(BIGCAT_TO_ENVS.keys()), 3) |
| for bigcat in bigcats: |
| noise_env = random.choice(BIGCAT_TO_ENVS[bigcat]) |
| selected_noise_envs.append(noise_env) |
| else: |
| |
| if env_code not in ENV_TO_BIGCAT: |
| print(f" Skip (unknown text env code): {env_code}") |
| continue |
|
|
| |
| selected_noise_envs.append(env_code) |
|
|
| |
| within_env = WITHIN_ROTATION.get(env_code) |
| if within_env is None: |
| print(f" Warning: no Within rotation rule; skipping Within-Mismatch: {env_code}") |
| else: |
| selected_noise_envs.append(within_env) |
|
|
| |
| src_bigcat = ENV_TO_BIGCAT[env_code] |
| dst_bigcat = CROSS_BIGCAT_MAP.get(src_bigcat) |
| if dst_bigcat is None: |
| print(f" Warning: no Cross big-category map; skipping Cross-Mismatch: {src_bigcat}") |
| else: |
| cross_env = random.choice(BIGCAT_TO_ENVS[dst_bigcat]) |
| selected_noise_envs.append(cross_env) |
|
|
| |
| if len(selected_noise_envs) != 3: |
| print(f" Skip (could not build 3 conditions, got {len(selected_noise_envs)}): {speech_name}") |
| continue |
|
|
| |
| for noise_env in selected_noise_envs: |
| noise_env = noise_env.upper() |
| noise_path = get_noise_path(noise_index, noise_env) |
| if noise_path is None: |
| print(f" Skip (noise file not found): {noise_env}.wav") |
| continue |
|
|
| noise_data, noise_sr = read_wav(noise_path) |
| noise_name = os.path.splitext(os.path.basename(noise_path))[0].upper() |
|
|
| |
| if speech_sr != noise_sr: |
| print(f" Warning: sample rate mismatch - speech: {speech_sr} Hz, noise ({noise_name}): {noise_sr} Hz") |
| resample_ratio = speech_sr / noise_sr |
| new_length = int(len(noise_data) * resample_ratio) |
| noise_data = signal.resample(noise_data, new_length) |
|
|
| for snr in snr_levels: |
| noisy_speech = add_noise(speech_data, noise_data, snr) |
| |
| |
| output_filename = f"{speech_name}_{noise_name}_{snr}.wav" |
| output_path = os.path.join(output_folder, output_filename) |
| write_wav(output_path, noisy_speech, speech_sr) |
| processed += 1 |
|
|
| print(f" Progress: {processed}/{total_files}") |
| print("-" * 40) |
| |
| print(f"\nDone. Generated {processed} files total.") |
| print(f"Saved under: {output_folder}") |
|
|
| def check_files(folder): |
| """List a few WAV files in a folder.""" |
| files = glob.glob(os.path.join(folder, "*.wav")) |
| print(f"\nFiles in folder {folder}:") |
| for f in files[:5]: |
| data, sr = read_wav(f) |
| print(f" {os.path.basename(f)}: {sr} Hz, {len(data)/sr:.2f} s") |
| if len(files) > 5: |
| print(f" ... and {len(files)-5} more file(s)") |
|
|
| |
| if __name__ == "__main__": |
| |
| speech_folder = "speech_16k" |
| noise_folder = "noise" |
| output_folder = "noisy_speech" |
| |
| |
| snr_levels=[-10, -5, 0, 5 , 10] |
| |
| print("Starting speech + noise mixing...") |
| print(f"Speech folder: {speech_folder}") |
| print(f"Noise folder: {noise_folder}") |
| print(f"Output folder: {output_folder}") |
| |
| |
| check_files(speech_folder) |
| check_files(noise_folder) |
| |
| process_files(speech_folder, noise_folder, output_folder, snr_levels) |
| |
| print("\nAll done.") |