Wonder239 commited on
Commit
d1999e7
·
verified ·
1 Parent(s): 9dff11e

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +2 -0
  2. Code For Speech Generation/BSC/addNoise.py +288 -0
  3. Code For Speech Generation/BSC/edgeTTS.py +76 -0
  4. Code For Speech Generation/BSC/mp3_to_wav.py +68 -0
  5. Code For Speech Generation/README.md +23 -0
  6. Code For Speech Generation/SIC/SIC_audio_generation.py +225 -0
  7. Data/Audio/BSC/DKITCHEN_E01.wav +3 -0
  8. Data/Audio/BSC/DKITCHEN_E02.wav +3 -0
  9. Data/Audio/BSC/DKITCHEN_I01.wav +3 -0
  10. Data/Audio/BSC/DKITCHEN_I02.wav +3 -0
  11. Data/Audio/BSC/DLIVING_E01.wav +3 -0
  12. Data/Audio/BSC/DLIVING_E02.wav +3 -0
  13. Data/Audio/BSC/DLIVING_I01.wav +3 -0
  14. Data/Audio/BSC/DLIVING_I02.wav +3 -0
  15. Data/Audio/BSC/DWASHING_E01.wav +3 -0
  16. Data/Audio/BSC/DWASHING_E02.wav +3 -0
  17. Data/Audio/BSC/DWASHING_I01.wav +3 -0
  18. Data/Audio/BSC/DWASHING_I02.wav +3 -0
  19. Data/Audio/BSC/NEUTRAL_N01.wav +3 -0
  20. Data/Audio/BSC/NEUTRAL_N02.wav +3 -0
  21. Data/Audio/BSC/NEUTRAL_N03.wav +3 -0
  22. Data/Audio/BSC/NEUTRAL_N04.wav +3 -0
  23. Data/Audio/BSC/NEUTRAL_N05.wav +3 -0
  24. Data/Audio/BSC/NEUTRAL_N06.wav +3 -0
  25. Data/Audio/BSC/NEUTRAL_N07.wav +3 -0
  26. Data/Audio/BSC/NEUTRAL_N08.wav +3 -0
  27. Data/Audio/BSC/NEUTRAL_N09.wav +3 -0
  28. Data/Audio/BSC/NEUTRAL_N10.wav +3 -0
  29. Data/Audio/BSC/NEUTRAL_N11.wav +3 -0
  30. Data/Audio/BSC/NEUTRAL_N12.wav +3 -0
  31. Data/Audio/BSC/NFIELD_E01.wav +3 -0
  32. Data/Audio/BSC/NFIELD_E02.wav +3 -0
  33. Data/Audio/BSC/NFIELD_I01.wav +3 -0
  34. Data/Audio/BSC/NFIELD_I02.wav +3 -0
  35. Data/Audio/BSC/NPARK_E01.wav +3 -0
  36. Data/Audio/BSC/NPARK_E02.wav +3 -0
  37. Data/Audio/BSC/NPARK_I01.wav +3 -0
  38. Data/Audio/BSC/NPARK_I02.wav +3 -0
  39. Data/Audio/BSC/NRIVER_E01.wav +3 -0
  40. Data/Audio/BSC/NRIVER_E02.wav +3 -0
  41. Data/Audio/BSC/NRIVER_I01.wav +3 -0
  42. Data/Audio/BSC/NRIVER_I02.wav +3 -0
  43. Data/Audio/BSC/OHALLWAY_E01.wav +3 -0
  44. Data/Audio/BSC/OHALLWAY_E02.wav +3 -0
  45. Data/Audio/BSC/OHALLWAY_I01.wav +3 -0
  46. Data/Audio/BSC/OHALLWAY_I02.wav +3 -0
  47. Data/Audio/BSC/OMEETING_E01.wav +3 -0
  48. Data/Audio/BSC/OMEETING_E02.wav +3 -0
  49. Data/Audio/BSC/OMEETING_I01.wav +3 -0
  50. Data/Audio/BSC/OMEETING_I02.wav +3 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .DS_Store
2
+ Thumbs.db
Code For Speech Generation/BSC/addNoise.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import soundfile as sf
3
+ import os
4
+ import glob
5
+ from scipy import signal
6
+ import random
7
+
8
+ BIGCAT_TO_ENVS = {
9
+ "Domestic": ["DWASHING", "DKITCHEN", "DLIVING"],
10
+ "Nature": ["NFIELD", "NRIVER", "NPARK"],
11
+ "Office": ["OOFFICE", "OHALLWAY", "OMEETING"],
12
+ "Public": ["PSTATION", "PCAFETER", "PRESTO"],
13
+ "Street": ["STRAFFIC", "SPSQUARE", "SCAFE"],
14
+ "Transportation": ["TMETRO", "TBUS", "TCAR"],
15
+ }
16
+
17
+ ENV_TO_BIGCAT = {env: bigcat for bigcat, envs in BIGCAT_TO_ENVS.items() for env in envs}
18
+
19
+ # Within each big category: rotate 3 sub-environment codes (Within-Mismatch)
20
+ WITHIN_ROTATION = {
21
+ # Domestic
22
+ "DWASHING": "DKITCHEN",
23
+ "DKITCHEN": "DLIVING",
24
+ "DLIVING": "DWASHING",
25
+ # Nature
26
+ "NFIELD": "NRIVER",
27
+ "NRIVER": "NPARK",
28
+ "NPARK": "NFIELD",
29
+ # Office
30
+ "OOFFICE": "OHALLWAY",
31
+ "OHALLWAY": "OMEETING",
32
+ "OMEETING": "OOFFICE",
33
+ # Public
34
+ "PSTATION": "PCAFETER",
35
+ "PCAFETER": "PRESTO",
36
+ "PRESTO": "PSTATION",
37
+ # Street
38
+ "STRAFFIC": "SPSQUARE",
39
+ "SPSQUARE": "SCAFE",
40
+ "SCAFE": "STRAFFIC",
41
+ # Transportation
42
+ "TMETRO": "TBUS",
43
+ "TBUS": "TCAR",
44
+ "TCAR": "TMETRO",
45
+ }
46
+
47
+ # Cross big-category mapping (Cross-Mismatch)
48
+ CROSS_BIGCAT_MAP = {
49
+ "Domestic": "Street",
50
+ "Nature": "Transportation",
51
+ "Office": "Nature",
52
+ "Public": "Domestic",
53
+ "Street": "Office",
54
+ "Transportation": "Public",
55
+ }
56
+
57
+
58
+ def read_wav(file_path):
59
+ """Read a WAV file."""
60
+ return sf.read(file_path)
61
+
62
+ def write_wav(file_path, data, samplerate):
63
+ """Write a WAV file."""
64
+ sf.write(file_path, data, samplerate)
65
+
66
+ def calculate_rms(signal):
67
+ """Compute RMS."""
68
+ return np.sqrt(np.mean(signal**2))
69
+
70
+ def add_noise(speech, noise, snr_db):
71
+ """Add noise to speech at the given SNR (dB)."""
72
+ # Match speech and noise length
73
+ if len(noise) < len(speech):
74
+ # If noise is too short, tile it
75
+ noise = np.tile(noise, int(np.ceil(len(speech) / len(noise))))[:len(speech)]
76
+ else:
77
+ # If noise is too long, take a random segment
78
+ start = random.randint(0, len(noise) - len(speech))
79
+ noise = noise[start:start + len(speech)]
80
+
81
+ # Compute RMS
82
+ speech_rms = calculate_rms(speech)
83
+ noise_rms = calculate_rms(noise)
84
+
85
+ # Scale noise to target SNR
86
+ snr_linear = 10 ** (snr_db / 20)
87
+ noise_rms_target = speech_rms / snr_linear
88
+
89
+ # Scale noise amplitude
90
+ noise_adjusted = noise * (noise_rms_target / (noise_rms + 1e-10))
91
+
92
+ # Mix
93
+ noisy_speech = speech + noise_adjusted
94
+
95
+ # Avoid clipping; normalize to [-1, 1]
96
+ max_val = np.max(np.abs(noisy_speech))
97
+ if max_val > 1:
98
+ noisy_speech = noisy_speech / max_val * 0.95
99
+
100
+ return noisy_speech
101
+
102
+ def parse_speech_stem(stem: str):
103
+ """
104
+ Parse speech filename (without extension).
105
+ Expected format:
106
+ - {ENV}_{E|I}{ID} e.g. DKITCHEN_E01
107
+ - NEUTRAL_N{ID} e.g. NEUTRAL_N03
108
+ Returns: (env_code, type_char, rest)
109
+ """
110
+ parts = stem.split("_", 1)
111
+ if len(parts) != 2:
112
+ raise ValueError(f"Cannot parse speech filename (missing '_'): {stem}")
113
+ env_code, rest = parts[0], parts[1]
114
+ if not rest:
115
+ raise ValueError(f"Cannot parse speech filename (empty after '_'): {stem}")
116
+ type_char = rest[0].upper()
117
+ return env_code.upper(), type_char, rest
118
+
119
+ def build_noise_index(noise_files):
120
+ """Build noise env code -> file path mapping (stems uppercased)."""
121
+ idx = {}
122
+ for p in noise_files:
123
+ stem = os.path.splitext(os.path.basename(p))[0].upper()
124
+ idx[stem] = p
125
+ return idx
126
+
127
+ def get_noise_path(noise_index, env_code: str):
128
+ """Look up noise file path by env code in the index (exact match after uppercasing)."""
129
+ env_code = env_code.upper()
130
+ return noise_index.get(env_code)
131
+
132
+ def process_files(speech_folder, noise_folder, output_folder, snr_levels=[-10, -5, 0, 5 , 10], seed=None):
133
+ """Process all speech files."""
134
+
135
+ # Collect all speech files
136
+ speech_files = glob.glob(os.path.join(speech_folder, "*.wav"))
137
+ speech_files.sort()
138
+
139
+ # Collect all noise files (e.g. 06.wav, 07.wav, ...)
140
+ noise_files = glob.glob(os.path.join(noise_folder, "*.wav"))
141
+ noise_files.sort()
142
+ noise_index = build_noise_index(noise_files)
143
+
144
+ if seed is not None:
145
+ random.seed(seed)
146
+
147
+ print(f"Found {len(speech_files)} speech files")
148
+ print(f"Found {len(noise_files)} noise files")
149
+ print(f"SNR levels (dB): {snr_levels}")
150
+ print("=" * 60)
151
+
152
+ # Create output folder
153
+ os.makedirs(output_folder, exist_ok=True)
154
+
155
+ # Count expected outputs:
156
+ # Explicit/Implicit: 3 conditions per utterance (Matched / Within / Cross); Neutral: 3 big-category backgrounds
157
+ total_files = 0
158
+ for speech_file in speech_files:
159
+ stem = os.path.splitext(os.path.basename(speech_file))[0]
160
+ try:
161
+ env_code, type_char, _ = parse_speech_stem(stem)
162
+ except Exception:
163
+ # Skip unparseable stems from the total count (they are skipped later too)
164
+ continue
165
+ if type_char == "N" or env_code == "NEUTRAL":
166
+ total_files += 3 * len(snr_levels)
167
+ else:
168
+ total_files += 3 * len(snr_levels)
169
+ processed = 0
170
+
171
+ # Process each speech file
172
+ for speech_file in speech_files:
173
+ # Load speech
174
+ speech_data, speech_sr = read_wav(speech_file)
175
+ speech_name = os.path.splitext(os.path.basename(speech_file))[0]
176
+ try:
177
+ env_code, type_char, utt_id = parse_speech_stem(speech_name)
178
+ except Exception as e:
179
+ print(f"\nSkipping speech (unparseable filename): {speech_name}, reason: {e}")
180
+ continue
181
+
182
+ print(f"\nProcessing speech: {speech_name} (duration: {len(speech_data)/speech_sr:.2f} s)")
183
+
184
+ # Pick 3 noise envs per utterance (Matched / Within / Cross, or Neutral rules)
185
+ selected_noise_envs = []
186
+
187
+ is_neutral = (type_char == "N") or (env_code == "NEUTRAL")
188
+ if is_neutral:
189
+ # Neutral: 3 different big-category backgrounds (no matched condition)
190
+ bigcats = random.sample(list(BIGCAT_TO_ENVS.keys()), 3)
191
+ for bigcat in bigcats:
192
+ noise_env = random.choice(BIGCAT_TO_ENVS[bigcat])
193
+ selected_noise_envs.append(noise_env)
194
+ else:
195
+ # Explicit / Implicit
196
+ if env_code not in ENV_TO_BIGCAT:
197
+ print(f" Skip (unknown text env code): {env_code}")
198
+ continue
199
+
200
+ # 1) Matched
201
+ selected_noise_envs.append(env_code)
202
+
203
+ # 2) Within-Mismatch (rotate within same big category)
204
+ within_env = WITHIN_ROTATION.get(env_code)
205
+ if within_env is None:
206
+ print(f" Warning: no Within rotation rule; skipping Within-Mismatch: {env_code}")
207
+ else:
208
+ selected_noise_envs.append(within_env)
209
+
210
+ # 3) Cross-Mismatch (cross big-category map + random sub-env in target category)
211
+ src_bigcat = ENV_TO_BIGCAT[env_code]
212
+ dst_bigcat = CROSS_BIGCAT_MAP.get(src_bigcat)
213
+ if dst_bigcat is None:
214
+ print(f" Warning: no Cross big-category map; skipping Cross-Mismatch: {src_bigcat}")
215
+ else:
216
+ cross_env = random.choice(BIGCAT_TO_ENVS[dst_bigcat])
217
+ selected_noise_envs.append(cross_env)
218
+
219
+ # Require exactly 3 conditions when rules are complete
220
+ if len(selected_noise_envs) != 3:
221
+ print(f" Skip (could not build 3 conditions, got {len(selected_noise_envs)}): {speech_name}")
222
+ continue
223
+
224
+ # Generate one output per selected noise env
225
+ for noise_env in selected_noise_envs:
226
+ noise_env = noise_env.upper()
227
+ noise_path = get_noise_path(noise_index, noise_env)
228
+ if noise_path is None:
229
+ print(f" Skip (noise file not found): {noise_env}.wav")
230
+ continue
231
+
232
+ noise_data, noise_sr = read_wav(noise_path)
233
+ noise_name = os.path.splitext(os.path.basename(noise_path))[0].upper()
234
+
235
+ # Resample if sample rates differ
236
+ if speech_sr != noise_sr:
237
+ print(f" Warning: sample rate mismatch - speech: {speech_sr} Hz, noise ({noise_name}): {noise_sr} Hz")
238
+ resample_ratio = speech_sr / noise_sr
239
+ new_length = int(len(noise_data) * resample_ratio)
240
+ noise_data = signal.resample(noise_data, new_length)
241
+
242
+ for snr in snr_levels:
243
+ noisy_speech = add_noise(speech_data, noise_data, snr)
244
+ # Output name: {speech_stem}_{noise_stem}_{snr}.wav
245
+ # e.g. DKITCHEN_E01_DKITCHEN_-5.wav
246
+ output_filename = f"{speech_name}_{noise_name}_{snr}.wav"
247
+ output_path = os.path.join(output_folder, output_filename)
248
+ write_wav(output_path, noisy_speech, speech_sr)
249
+ processed += 1
250
+
251
+ print(f" Progress: {processed}/{total_files}")
252
+ print("-" * 40)
253
+
254
+ print(f"\nDone. Generated {processed} files total.")
255
+ print(f"Saved under: {output_folder}")
256
+
257
+ def check_files(folder):
258
+ """List a few WAV files in a folder."""
259
+ files = glob.glob(os.path.join(folder, "*.wav"))
260
+ print(f"\nFiles in folder {folder}:")
261
+ for f in files[:5]: # show first 5 only
262
+ data, sr = read_wav(f)
263
+ print(f" {os.path.basename(f)}: {sr} Hz, {len(data)/sr:.2f} s")
264
+ if len(files) > 5:
265
+ print(f" ... and {len(files)-5} more file(s)")
266
+
267
+ # Main
268
+ if __name__ == "__main__":
269
+ # Folder paths
270
+ speech_folder = "speech_16k" # speech WAVs
271
+ noise_folder = "noise" # noise WAVs
272
+ output_folder = "noisy_speech" # output
273
+
274
+ # SNR levels (dB)
275
+ snr_levels=[-10, -5, 0, 5 , 10]
276
+
277
+ print("Starting speech + noise mixing...")
278
+ print(f"Speech folder: {speech_folder}")
279
+ print(f"Noise folder: {noise_folder}")
280
+ print(f"Output folder: {output_folder}")
281
+
282
+ # Quick peek at inputs
283
+ check_files(speech_folder)
284
+ check_files(noise_folder)
285
+
286
+ process_files(speech_folder, noise_folder, output_folder, snr_levels)
287
+
288
+ print("\nAll done.")
Code For Speech Generation/BSC/edgeTTS.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import edge_tts
3
+ import pandas as pd
4
+ import os
5
+ from tqdm import tqdm # progress bar
6
+
7
+ # ================= Configuration =================
8
+ INPUT_FILE = ""
9
+ OUTPUT_DIR = ""
10
+ VOICE = "en-US-GuyNeural"
11
+ # ===========================================
12
+
13
+ async def generate_speech(code, text, output_path):
14
+ """
15
+ Generate one speech clip.
16
+ """
17
+ try:
18
+ communicate = edge_tts.Communicate(text, VOICE)
19
+ await communicate.save(output_path)
20
+ # Use tqdm.write instead of print so the progress bar layout stays intact
21
+ tqdm.write(f"[OK] Saved {code}.mp3")
22
+ return True
23
+ except Exception as e:
24
+ tqdm.write(f"[FAIL] {code}.mp3: {e}")
25
+ return False
26
+
27
+ async def amain():
28
+ # 1. Ensure output directory exists
29
+ if not os.path.exists(OUTPUT_DIR):
30
+ os.makedirs(OUTPUT_DIR)
31
+ tqdm.write(f"Created output directory: {OUTPUT_DIR}")
32
+
33
+ # 2. Load Excel
34
+ try:
35
+ df = pd.read_excel(INPUT_FILE)
36
+ # Strip column names
37
+ df.columns = df.columns.str.strip()
38
+ if 'code' not in df.columns or 'sentence' not in df.columns:
39
+ tqdm.write("Error: Excel must contain 'code' and 'sentence' columns")
40
+ return
41
+ except FileNotFoundError:
42
+ tqdm.write(f"Error: file not found: {INPUT_FILE}")
43
+ return
44
+ except Exception as e:
45
+ tqdm.write(f"Error reading Excel: {e}")
46
+ return
47
+
48
+ total_count = len(df)
49
+ tqdm.write(f"Starting: {total_count} row(s)...")
50
+
51
+ # 3. Iterate rows with tqdm
52
+ # desc: label on the left of the bar
53
+ # unit: bar unit
54
+ # total: row count for percentage
55
+ for index, row in tqdm(df.iterrows(), total=total_count, desc="Progress", unit="row"):
56
+ code = str(row['code']).strip()
57
+ sentence = str(row['sentence']).strip()
58
+
59
+ # Skip empty sentences
60
+ if not sentence:
61
+ tqdm.write(f"[SKIP] row {index}: empty sentence")
62
+ continue
63
+
64
+ # Sanitize code for use in filenames
65
+ safe_code = "".join([c for c in code if c not in r'\/:*?"<>|'])
66
+
67
+ # Output path
68
+ output_file = os.path.join(OUTPUT_DIR, f"{safe_code}.mp3")
69
+
70
+ # Synthesize
71
+ await generate_speech(safe_code, sentence, output_file)
72
+
73
+ tqdm.write("All tasks finished.")
74
+
75
+ if __name__ == "__main__":
76
+ asyncio.run(amain())
Code For Speech Generation/BSC/mp3_to_wav.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydub import AudioSegment
2
+ import os
3
+ import glob
4
+
5
+ def mp3_to_wav(mp3_path, output_folder=None, target_sample_rate=16000):
6
+ """Convert MP3 to WAV and set sample rate to 16 kHz."""
7
+
8
+ if output_folder:
9
+ filename = os.path.basename(mp3_path)
10
+ wav_filename = os.path.splitext(filename)[0] + '.wav'
11
+ wav_path = os.path.join(output_folder, wav_filename)
12
+ else:
13
+ wav_path = os.path.splitext(mp3_path)[0] + '.wav'
14
+
15
+ # Load MP3
16
+ audio = AudioSegment.from_mp3(mp3_path)
17
+ print(f"Original sample rate: {audio.frame_rate} Hz")
18
+
19
+ # Resample to target sample rate (16 kHz)
20
+ if audio.frame_rate != target_sample_rate:
21
+ audio = audio.set_frame_rate(target_sample_rate)
22
+ print(f"Resampled to: {target_sample_rate} Hz")
23
+
24
+ # Export as WAV
25
+ audio.export(wav_path, format="wav")
26
+
27
+ print(f"Conversion complete: {mp3_path} -> {wav_path}")
28
+ return wav_path
29
+
30
+ def batch_convert(input_folder, output_folder=None, target_sample_rate=16000):
31
+
32
+ mp3_files = glob.glob(os.path.join(input_folder, "*.mp3"))
33
+ mp3_files.extend(glob.glob(os.path.join(input_folder, "*.MP3")))
34
+ mp3_files.sort()
35
+
36
+ if not mp3_files:
37
+ print(f"No MP3 files found in folder: {input_folder}")
38
+ return
39
+
40
+ if output_folder:
41
+ os.makedirs(output_folder, exist_ok=True)
42
+ print(f"Output folder: {output_folder}")
43
+
44
+ print(f"Found {len(mp3_files)} MP3 file(s), target sample rate: {target_sample_rate} Hz")
45
+ print("=" * 50)
46
+ success = 0
47
+ failed = 0
48
+
49
+ for mp3_file in mp3_files:
50
+ try:
51
+ mp3_to_wav(mp3_file, output_folder, target_sample_rate)
52
+ success += 1
53
+ print("-" * 50)
54
+ except Exception as e:
55
+ print(f"Conversion failed {mp3_file}: {e}")
56
+ failed += 1
57
+
58
+ print(f"Batch complete. Succeeded: {success}, failed: {failed}")
59
+
60
+ if __name__ == "__main__":
61
+ input_folder = "speech" # Input folder
62
+ output_folder = "speech_16k" # Output folder for 16 kHz audio
63
+
64
+ if os.path.exists(input_folder):
65
+ batch_convert(input_folder, output_folder, target_sample_rate=16000)
66
+ else:
67
+ print(f"Folder does not exist: {input_folder}")
68
+ print(f"Current directory: {os.getcwd()}")
Code For Speech Generation/README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Speech generation code (DEAF)
3
+
4
+ Scripts used to build the **BSC** and **SIC** speech datasets for [DEAF: A Benchmark for Diagnostic Evaluation of Acoustic Faithfulness in Audio Language Models](https://arxiv.org/abs/2603.18048).
5
+
6
+ ## BSC pipeline
7
+
8
+ 1. **`BSC/edgeTTS.py`** — Synthesize speech from text with Edge TTS.
9
+ 2. **`BSC/mp3_to_wav.py`** — Convert synthesized speech and background audio (e.g. MP3) to **WAV** at **16 kHz**.
10
+ 3. **`BSC/addNoise.py`** — Mix speech with background noise to produce the final **BSC** samples.
11
+
12
+ Run these steps in order from the repository root or adjust paths to match your local layout.
13
+
14
+ ## SIC pipeline
15
+
16
+ - **`SIC/SIC_audio_generation.py`** — Generate speech from text for the **SIC** subset (single script end-to-end for this track). Speech synthesis uses the **ElevenLabs Python SDK**.
17
+
18
+ **Reference (tooling):** ElevenLabs. 2024. *ElevenLabs Python SDK.* [https://github.com/elevenlabs/elevenlabs-python](https://github.com/elevenlabs/elevenlabs-python)
19
+
20
+ ## Requirements
21
+
22
+ Install dependencies used by the scripts you run (for example `edge-tts`, `pydub`, `soundfile`, `numpy`, `scipy`, **`elevenlabs`** for the SIC pipeline, and any others imported in each file). Pin versions in your own `requirements.txt` if you publish this folder as a standalone project.
23
+
Code For Speech Generation/SIC/SIC_audio_generation.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import wave
4
+ from dataclasses import dataclass
5
+
6
+ from dotenv import load_dotenv
7
+ from elevenlabs.client import ElevenLabs
8
+ from openpyxl import load_workbook
9
+
10
+ load_dotenv()
11
+
12
+
13
+ ELEVENLABS_API_KEY = " " # Enter your ElevenLabs API key here or set it in the .env file as ELEVENLABS_API_KEY
14
+ SIC_XLSX_PATH = " " # Enter the path to your SIC_texts.xlsx file here
15
+ OUTPUT_DIR = "SIC_clips" # Output directory for generated clips
16
+ MODEL_ID = "eleven_multilingual_v2"
17
+ OUTPUT_FORMAT = "pcm_16000"
18
+ SAMPLE_RATE = 16000
19
+ N_CHANNELS = 1
20
+ SAMPLE_WIDTH_BYTES = 2 # 16-bit PCM
21
+
22
+
23
+ VOICE_IDS = {
24
+ "elderly_male": "Av4Fi2idMFuA8kTbVZgv",
25
+ "young_male": "1wzJ0Fr9SDexsF2IsKU4",
26
+ "elderly_female": "0rEo3eAjssGDUCXHYENf",
27
+ "young_female": "aFueGIISJUmscc05ZNfD",
28
+ }
29
+
30
+ # For single-dimension contrasts, to control only that dimension,
31
+ # the other dimension is fixed by default:
32
+ # - Gender dimension: fix age to "young"
33
+ # - Age dimension: fix gender to "male"
34
+ GDR_FIXED_AGE = "young" # "young" or "elderly"
35
+ AGE_FIXED_GENDER = "male" # "male" or "female"
36
+
37
+
38
+ @dataclass(frozen=True)
39
+ class SicCode:
40
+ dim: str # GDR / AGE / CMB / NEU
41
+ typ: str # EX / IM / NT
42
+ ident: str # F/M/EL/YG/EF/EM/YF/YM/NA
43
+ nn: str # 01,02,...
44
+
45
+ @property
46
+ def stem(self) -> str:
47
+ return f"{self.dim}_{self.typ}_{self.ident}_{self.nn}"
48
+
49
+
50
+ CODE_RE = re.compile(r"^(GDR|AGE|CMB|NEU)_(EX|IM|NT)_([A-Z]{1,2})_(\d{2})$")
51
+
52
+
53
+ def parse_code(code: str) -> SicCode:
54
+ code = str(code).strip().upper()
55
+ m = CODE_RE.match(code)
56
+ if not m:
57
+ raise ValueError(f"Invalid code format: {code}")
58
+ return SicCode(dim=m.group(1), typ=m.group(2), ident=m.group(3), nn=m.group(4))
59
+
60
+
61
+ def voice_key_from_gender_age(gender: str, age: str) -> str:
62
+ gender = gender.lower()
63
+ age = age.lower()
64
+ if gender not in ("male", "female"):
65
+ raise ValueError(f"Unknown gender: {gender}")
66
+ if age not in ("young", "elderly"):
67
+ raise ValueError(f"Unknown age: {age}")
68
+ return f"{age}_{gender}"
69
+
70
+
71
+ def pick_voices_for_code(sc: SicCode) -> list[str]:
72
+ """
73
+ Return the list of `voice_key`s to generate (each key must exist in `VOICE_IDS`).
74
+ - GDR: 2 clips (matched / mismatched)
75
+ - AGE: 2 clips (matched / mismatched)
76
+ - CMB: 4 clips (both matched / gender mismatched only / age mismatched only / both mismatched)
77
+ - NEU or NT: 4 clips (young_male, young_female, elderly_male, elderly_female)
78
+ """
79
+ # Neutral (control group)
80
+ if sc.dim == "NEU" or sc.typ == "NT":
81
+ return ["young_male", "young_female", "elderly_male", "elderly_female"]
82
+
83
+ if sc.dim == "GDR":
84
+ if sc.ident not in ("F", "M"):
85
+ raise ValueError(f"GDR ident must be F/M, got: {sc.ident}")
86
+ gender_sem = "female" if sc.ident == "F" else "male"
87
+ age_fixed = GDR_FIXED_AGE
88
+ matched = voice_key_from_gender_age(gender_sem, age_fixed)
89
+ mismatched_gender = "male" if gender_sem == "female" else "female"
90
+ mismatched = voice_key_from_gender_age(mismatched_gender, age_fixed)
91
+ return [matched, mismatched]
92
+
93
+ if sc.dim == "AGE":
94
+ if sc.ident not in ("EL", "YG"):
95
+ raise ValueError(f"AGE ident must be EL/YG, got: {sc.ident}")
96
+ age_sem = "elderly" if sc.ident == "EL" else "young"
97
+ gender_fixed = AGE_FIXED_GENDER
98
+ matched = voice_key_from_gender_age(gender_fixed, age_sem)
99
+ mismatched_age = "young" if age_sem == "elderly" else "elderly"
100
+ mismatched = voice_key_from_gender_age(gender_fixed, mismatched_age)
101
+ return [matched, mismatched]
102
+
103
+ if sc.dim == "CMB":
104
+ if sc.ident not in ("EF", "EM", "YF", "YM"):
105
+ raise ValueError(f"CMB ident must be EF/EM/YF/YM, got: {sc.ident}")
106
+ age_sem = "elderly" if sc.ident.startswith("E") else "young"
107
+ gender_sem = "female" if sc.ident.endswith("F") else "male"
108
+
109
+ both_matched = voice_key_from_gender_age(gender_sem, age_sem)
110
+
111
+ # gender mismatched only: flip gender keep age
112
+ g_flip = "male" if gender_sem == "female" else "female"
113
+ gender_mis_only = voice_key_from_gender_age(g_flip, age_sem)
114
+
115
+ # age mismatched only: flip age keep gender
116
+ a_flip = "young" if age_sem == "elderly" else "elderly"
117
+ age_mis_only = voice_key_from_gender_age(gender_sem, a_flip)
118
+
119
+ # both mismatched: flip both
120
+ both_mis = voice_key_from_gender_age(g_flip, a_flip)
121
+
122
+ return [both_matched, gender_mis_only, age_mis_only, both_mis]
123
+
124
+ raise ValueError(f"Unknown dim: {sc.dim}")
125
+
126
+
127
+ def pcm16_to_wav_bytes(pcm_bytes: bytes) -> bytes:
128
+ """Wrap ElevenLabs `pcm_16000` (16-bit) into WAV bytes."""
129
+ out_path = None
130
+ # Write WAV via `wave`: use a temp file to avoid extra dependencies (keep it simple and stable)
131
+ import tempfile
132
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
133
+ out_path = tmp.name
134
+ try:
135
+ with wave.open(out_path, "wb") as wf:
136
+ wf.setnchannels(N_CHANNELS)
137
+ wf.setsampwidth(SAMPLE_WIDTH_BYTES)
138
+ wf.setframerate(SAMPLE_RATE)
139
+ wf.writeframes(pcm_bytes)
140
+ with open(out_path, "rb") as f:
141
+ return f.read()
142
+ finally:
143
+ if out_path and os.path.exists(out_path):
144
+ try:
145
+ os.remove(out_path)
146
+ except OSError:
147
+ pass
148
+
149
+
150
+ def generate_one(client: ElevenLabs, text: str, voice_key: str, out_path: str):
151
+ voice_id = VOICE_IDS.get(voice_key)
152
+ if not voice_id:
153
+ raise ValueError(f"voice_key not configured in VOICE_IDS: {voice_key}")
154
+
155
+ audio_stream = client.text_to_speech.convert(
156
+ text=text,
157
+ voice_id=voice_id,
158
+ model_id=MODEL_ID,
159
+ output_format=OUTPUT_FORMAT,
160
+ )
161
+
162
+ pcm = b"".join(audio_stream)
163
+ wav_bytes = pcm16_to_wav_bytes(pcm)
164
+ with open(out_path, "wb") as f:
165
+ f.write(wav_bytes)
166
+
167
+
168
+ def load_sic_rows(xlsx_path: str) -> list[tuple[str, str]]:
169
+ wb = load_workbook(xlsx_path)
170
+ ws = wb.active
171
+ rows = []
172
+ for r in ws.iter_rows(min_row=1, values_only=True):
173
+ if not r or len(r) < 2:
174
+ continue
175
+ code, sentence = r[0], r[1]
176
+ if code is None or sentence is None:
177
+ continue
178
+ code_s = str(code).strip()
179
+ sent_s = str(sentence).strip()
180
+ if not code_s or code_s.lower() == "code":
181
+ continue
182
+ rows.append((code_s, sent_s))
183
+ return rows
184
+
185
+
186
+ def main():
187
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
188
+ client = ElevenLabs(api_key=ELEVENLABS_API_KEY)
189
+
190
+ rows = load_sic_rows(SIC_XLSX_PATH)
191
+ print(f"Loaded {len(rows)} rows")
192
+
193
+
194
+ total_clips = 0
195
+ for code_raw, _ in rows:
196
+ try:
197
+ sc_tmp = parse_code(code_raw)
198
+ total_clips += len(pick_voices_for_code(sc_tmp))
199
+ except Exception:
200
+ continue
201
+
202
+ ok = 0
203
+ fail = 0
204
+ for code_raw, sentence in rows:
205
+ try:
206
+ sc = parse_code(code_raw)
207
+ voice_keys = pick_voices_for_code(sc)
208
+ for vk in voice_keys:
209
+ out_name = f"{sc.stem}__{vk}.wav"
210
+ out_path = os.path.join(OUTPUT_DIR, out_name)
211
+ generate_one(client, sentence, vk, out_path)
212
+ ok += 1
213
+ if total_clips:
214
+ progress = ok + fail
215
+ pct = progress * 100.0 / total_clips
216
+ print(f"[progress] {progress}/{total_clips} ({pct:.1f}%) - current: {out_name}")
217
+ except Exception as e:
218
+ fail += 1
219
+ print(f"[skip] {code_raw}: {e}")
220
+
221
+ print(f"Done: generated {ok} clips; skipped/failed {fail} rows. Output directory: {OUTPUT_DIR}")
222
+
223
+
224
+ if __name__ == "__main__":
225
+ main()
Data/Audio/BSC/DKITCHEN_E01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dac697aacfff0491148e8c56e43d48b172c566f83011522aac8aa9ffff9d99fa
3
+ size 172844
Data/Audio/BSC/DKITCHEN_E02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44a5a10bb1166db29585560ddeb6a331b87da553364998ee0900ccfdb03bc954
3
+ size 211244
Data/Audio/BSC/DKITCHEN_I01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a57d1390c6d3e34d8a5c24658fd4a89e525130f591bf05fddab7be05081480f
3
+ size 146732
Data/Audio/BSC/DKITCHEN_I02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f13412e0368a6a489dcf46df34e21446f9d2f5dd19bec7258ba24a9f84f4a86c
3
+ size 302636
Data/Audio/BSC/DLIVING_E01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce62eb015d8a0109bd4b34f01b3420a0c4a653349512a4cb74a126ef9c78e79b
3
+ size 159788
Data/Audio/BSC/DLIVING_E02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ede90ef9878a71000c8160e7372051e411d0fa4704d7a5ae8dce3fcf58583c3f
3
+ size 215084
Data/Audio/BSC/DLIVING_I01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aab865e3213c62cd95331f1c710e35bdb09f8d461b35928f404edf2abc3c02c
3
+ size 192044
Data/Audio/BSC/DLIVING_I02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2779f383758421fbd43df613e9464e359fb960274744ae06c66b63dd146bd7b0
3
+ size 195116
Data/Audio/BSC/DWASHING_E01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0d365ccdfb2ef2613917e4b9ae19d9f15be461d334ff8c8b17f277644879117
3
+ size 233516
Data/Audio/BSC/DWASHING_E02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25e7d4c00430d35a796f42add63d5de1dff733e963674c9c2870b4defd4b5ba8
3
+ size 191276
Data/Audio/BSC/DWASHING_I01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca582c436df4479b1ecf8fa4a965c27a15152963207cebb52346018b2fe08e0f
3
+ size 248108
Data/Audio/BSC/DWASHING_I02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a23c3c0f98ca2c6a3c33fc8a8c4781288de84533e299f10db0f4b973e477992
3
+ size 214316
Data/Audio/BSC/NEUTRAL_N01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90bfb10516c4631c352accca6b12f0d5abbeaab662857c497c33f18a9f468342
3
+ size 107564
Data/Audio/BSC/NEUTRAL_N02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdbf0cbd5a69f2c5332238837be93b92a18f1c33b619e0f8f4df9c079e30a2b6
3
+ size 131372
Data/Audio/BSC/NEUTRAL_N03.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a61c52691d8bc8e4ad97c18c69b53c18ad744165e68f9ad7494901d7ee810969
3
+ size 119084
Data/Audio/BSC/NEUTRAL_N04.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3b1810e4711b68b4e7a53d60440221d42dc780dc065a42fd44e13edd390f401
3
+ size 124460
Data/Audio/BSC/NEUTRAL_N05.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02149d4b784806abc9e12a98ec2984c977d36d36ef67e879eb493e501d7b025d
3
+ size 119084
Data/Audio/BSC/NEUTRAL_N06.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38d1521c94bd6dda708930692c15992ae4f90866433881311166b35051a55394
3
+ size 140588
Data/Audio/BSC/NEUTRAL_N07.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7591cacecd6f87dfd9db2142e70f2c0b8e763619035129bfe39f6d1479e88239
3
+ size 132140
Data/Audio/BSC/NEUTRAL_N08.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b6cac4d2a4e10c729fabec8f23f7188bc9128c7ee6d4b6ab7248855e2bf44cd
3
+ size 156716
Data/Audio/BSC/NEUTRAL_N09.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:165d274215b43958b27d6aee1bf4b8e499776f116ef0a20f4e56cd1914717688
3
+ size 103724
Data/Audio/BSC/NEUTRAL_N10.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d93003adff76c18dce60cee6bd0dfb485743105662e8a1e348d3947e2ca26763
3
+ size 129068
Data/Audio/BSC/NEUTRAL_N11.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36cbedc29a5735df930c0dfc27c8b0a529fbb251f82ce105dde3622fd77a484e
3
+ size 114476
Data/Audio/BSC/NEUTRAL_N12.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a099eda3008adaddba59f22ffcf9377edfdbd45e814fdb6d6ea561cc87240ae
3
+ size 129836
Data/Audio/BSC/NFIELD_E01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eb927ba20e6958e17bee07cc9c5a9ec3e45872961351267d18a12ac3844c982
3
+ size 143660
Data/Audio/BSC/NFIELD_E02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a42ae48f343208c5850a656af1a5bf747e011b3cffa1328f6f28a8423bf745e
3
+ size 258092
Data/Audio/BSC/NFIELD_I01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d395a0f5ad6d70a53a7fa8ab7b698070c5f4b52908ed73d1ca9e2f819decc759
3
+ size 249644
Data/Audio/BSC/NFIELD_I02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8ac26774e61e90badcab0bdae47075935af9d0b3a60548acf2878d9cacaeb17
3
+ size 247340
Data/Audio/BSC/NPARK_E01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea7f61c621f7a4207920d0578f57d9a188c784af1616cff0ff96796f84d59664
3
+ size 258860
Data/Audio/BSC/NPARK_E02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f47253bbc7ae6c864b531a00a9a94390901b5016c1fae921ea8d1c3c9d04aee
3
+ size 284204
Data/Audio/BSC/NPARK_I01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f64f7d8ae5c6d59b69a7ac1b495ead8b487e63529057736f1045ecc5c1cc1fff
3
+ size 188972
Data/Audio/BSC/NPARK_I02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f18734a66434cb93440101c299e2bb41cf2945369f9542d1df0a5520ee8073
3
+ size 234284
Data/Audio/BSC/NRIVER_E01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d57e94e4b818f8ba03295ec9919116d79a2412b7e5dcf74963a3c90087574b0
3
+ size 183596
Data/Audio/BSC/NRIVER_E02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:077698ec1903e2006359305488105b0ef84e4ae03e4a113f84aebd0cd6fff757
3
+ size 174380
Data/Audio/BSC/NRIVER_I01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:313d3c64b55ddb40fc7f606f823bb788be7bf1d0e8212d07de3eb01972d6e3a9
3
+ size 178220
Data/Audio/BSC/NRIVER_I02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18cdfba2b3deff930e4883d68f368d30269570225fc59531d7ffd9a20aa1f9cc
3
+ size 196652
Data/Audio/BSC/OHALLWAY_E01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f6b1a8c2992ed6f547687e6d9c72fc661b0dd916465e09adea7b35b0e2b7140
3
+ size 181292
Data/Audio/BSC/OHALLWAY_E02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1097d4e65461e48aee0e6b6a5fef029b3a35c8d8a34ed251f964500515555365
3
+ size 155948
Data/Audio/BSC/OHALLWAY_I01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3742119bf6fcecf10105f37558a7d095f630b6af180d1362bef58ef46d534d4
3
+ size 176684
Data/Audio/BSC/OHALLWAY_I02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abffdb050a09256b81f7c81c3a6704d79924b7dc58a06febe5253944530ae9ff
3
+ size 188972
Data/Audio/BSC/OMEETING_E01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1ffd0bea9e8d635cc5e390e26aa70f372abdf72f646350fd93a5f82473fe253
3
+ size 225068
Data/Audio/BSC/OMEETING_E02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f6dbbfb38cc3b3dd06666b29f2c1b01de0185e9ba2d2d5408955d7ba5ad073e
3
+ size 202796
Data/Audio/BSC/OMEETING_I01.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9745fc0ea81251e46de24533aaaf8c47bd471950bc9c64387a75adac9b6cd1ff
3
+ size 220460
Data/Audio/BSC/OMEETING_I02.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58776c9835602d72ba6e97465335874876319a882271a03f81f529059503a741
3
+ size 177452