File size: 31,393 Bytes
fec9168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
"""
Task 4: Volume - Generate volume comparison questions

This task joins multiple audio sources with different volume levels
and asks questions about the loudest or softest sound.
"""

import csv
import random
import math
from pathlib import Path
from typing import Dict, List, Tuple, Optional

import sys
sys.path.append(str(Path(__file__).parent.parent))

from utils import (
    AudioProcessor, ESC50Dataset, QuestionGenerator, LLMQuestionGenerator,
    setup_logger, set_random_seed, calculate_num_samples_for_task,
    generate_single_clip_duration, get_max_clip_num_to_be_joined,
    build_clip_sequence_with_silences, generate_sample_durations_for_task,
    get_lufs_loudness, normalize_to_lufs
)


class VolumeTaskGenerator:
    """Generator for volume comparison task dataset."""
    
    def __init__(self, config: Dict, logger):
        """
        Initialize volume task generator.
        
        Args:
            config: Configuration dictionary
            logger: Logger instance
        """
        self.config = config
        self.logger = logger
        self.task_config = config['tasks']['volume']
        
        # Initialize components
        self.dataset = ESC50Dataset(
            config['esc50']['metadata_path'],
            config['esc50']['audio_path'],
            config  # Pass config for class subset loading
        )
        self.audio_processor = AudioProcessor(
            crossfade_duration=config['audio']['crossfade_duration'],
            silence_duration=config['audio']['silence_duration'],
            with_silence=config['audio']['with_silence'],
            normalize=config['audio']['normalize'],
            normalize_target_dBFS=config['audio']['normalize_target_dBFS'],
            synthetic_silence_path=config['synthetic_silence']['path']
        )
        self.question_generator = QuestionGenerator(
            num_options=config['mcq']['num_options'],
            option_labels=config['mcq']['option_labels'],
            distractor_strategy=config['mcq']['distractor_strategy']
        )
        
        # Initialize LLM question generator
        self.llm_enabled = config.get('llm', {}).get('enabled', False)
        self.llm_generator = LLMQuestionGenerator(
            enabled=self.llm_enabled,
            template_questions=self.task_config
        )
        
        # Duration settings from config
        self.min_clip_duration = config['audio']['min_clip_duration']
        self.max_clip_duration = config['audio']['max_clip_duration']
        # Duration of individual source clips (ESC-50 default is 5s)
        self.source_clip_duration = config['audio'].get('source_clip_duration', 5.0)
        self.min_silence_ms = config['audio'].get('min_silence_duration', 100)
        self.max_extra_silence_per_gap_ms = config['audio'].get('max_extra_silence_per_gap', 500)
        self.crossfade_ms = config['audio'].get('crossfade_duration', 0)
        self.task_duration_hours = self.task_config['task_duration_size']
        
        # Volume task specific settings
        self.normalize_to_baseline = self.task_config.get('normalize_to_baseline', True)
        self.baseline_dBFS = self.task_config.get('baseline_dBFS', -20.0)
        self.use_same_clip_different_volumes = self.task_config.get('use_same_clip_different_volumes', False)
        self.repetitions_per_source = self.task_config.get('repetitions_per_source', [2, 3, 4])
        if isinstance(self.repetitions_per_source, int):
            self.repetitions_per_source = [self.repetitions_per_source]
        
        # Volume gap multipliers (similar to duration task)
        self.multiplier_max_loudness = self.task_config.get('multiplier_max_loudness', 1.5)
        self.multiplier_min_loudness = self.task_config.get('multiplier_min_loudness', 0.5)
        self.reject_if_gap_not_met = self.task_config.get('reject_if_gap_not_met', True)
        
        # LUFS vs dBFS loudness measurement option
        # LUFS (Loudness Units Full Scale) measures PERCEIVED loudness
        # dBFS measures RMS amplitude - does NOT account for frequency sensitivity
        # LUFS is recommended for comparing different sound types
        self.use_lufs = self.task_config.get('use_lufs', True)
        self.baseline_lufs = self.task_config.get('baseline_lufs', -23.0)  # EBU R128 standard
        
        # Set up output paths
        self.output_base = Path(config['output']['base_path']) / 'volume'
        self.output_base.mkdir(parents=True, exist_ok=True)
        self.audio_output = self.output_base / 'audios'
        self.audio_output.mkdir(parents=True, exist_ok=True)
        
        # Create balanced sampling pool for num_clips
        self.clips_count_pool = []
    
    def _normalize_to_baseline(self, audio: "AudioSegment") -> "AudioSegment":
        """
        Normalize audio to the baseline loudness level.
        
        Uses LUFS (perceived loudness) if use_lufs=True, otherwise dBFS.
        This ensures all clips start from the same perceived loudness before
        applying volume adjustments.
        
        Args:
            audio: Input audio segment
            
        Returns:
            Normalized audio segment
        """
        if not self.normalize_to_baseline:
            return audio
        
        if self.use_lufs:
            # Use LUFS-based normalization (perceived loudness)
            normalized = normalize_to_lufs(audio, self.baseline_lufs)
            self.logger.debug(
                f"Normalized to baseline LUFS: {get_lufs_loudness(audio):.2f} -> {get_lufs_loudness(normalized):.2f} LUFS"
            )
            return normalized
        else:
            # Use dBFS normalization (RMS amplitude)
            change_in_dBFS = self.baseline_dBFS - audio.dBFS
            normalized = audio.apply_gain(change_in_dBFS)
            self.logger.debug(
                f"Normalized to baseline dBFS: {audio.dBFS:.2f} -> {normalized.dBFS:.2f} dBFS"
            )
            return normalized
    
    def _get_amplitude_loudness(self, audio: "AudioSegment") -> float:
        """
        Get the loudness of an audio clip.
        
        Uses LUFS (perceived loudness) if use_lufs=True, otherwise dBFS.
        
        Args:
            audio: Input audio segment
            
        Returns:
            Loudness in LUFS or dBFS depending on configuration
        """
        if self.use_lufs:
            return get_lufs_loudness(audio)
        else:
            return audio.dBFS
    
    def _verify_loudness_gap(
        self,
        volume_levels: List[float],
        question_type: str
    ) -> Tuple[bool, int, Dict]:
        """
        Verify that loudness gap constraint is satisfied.
        
        For MAX_LOUDNESS: max_volume >= second_max × multiplier_max
        For MIN_LOUDNESS: min_volume <= second_min × multiplier_min
        
        Since we work with dB (logarithmic), the gap is in dB difference:
        - For max: max_dB - second_max_dB >= required_gap_dB
        - For min: second_min_dB - min_dB >= required_gap_dB
        
        The multiplier translates to dB: 1.5x linear = ~3.5dB, 2x = ~6dB
        
        Args:
            volume_levels: List of volume adjustments in dB
            question_type: "max_loudness" or "min_loudness"
            
        Returns:
            Tuple of (gap_satisfied, answer_idx, metadata)
        """
        import math
        
        sorted_levels = sorted(volume_levels, reverse=True)  # Highest first
        
        if question_type == "max_loudness":
            max_level = sorted_levels[0]
            second_max = sorted_levels[1] if len(sorted_levels) > 1 else sorted_levels[0]
            
            # Convert multiplier to dB difference
            # multiplier 1.5 means 1.5x louder in amplitude = 20*log10(1.5) ≈ 3.5 dB
            required_gap_dB = 20 * math.log10(self.multiplier_max_loudness)
            actual_gap_dB = max_level - second_max
            
            gap_satisfied = actual_gap_dB >= required_gap_dB
            answer_idx = volume_levels.index(max_level)
            
            metadata = {
                'max_level_dB': max_level,
                'second_max_dB': second_max,
                'required_gap_dB': required_gap_dB,
                'actual_gap_dB': actual_gap_dB,
                'multiplier': self.multiplier_max_loudness
            }
            
        else:  # min_loudness
            min_level = sorted_levels[-1]
            second_min = sorted_levels[-2] if len(sorted_levels) > 1 else sorted_levels[-1]
            
            # For min, we want min to be multiplier times softer
            # multiplier 0.5 means 0.5x amplitude = 20*log10(0.5) ≈ -6 dB
            # So second_min - min_level should be >= 6 dB
            required_gap_dB = abs(20 * math.log10(self.multiplier_min_loudness))
            actual_gap_dB = second_min - min_level
            
            gap_satisfied = actual_gap_dB >= required_gap_dB
            answer_idx = volume_levels.index(min_level)
            
            metadata = {
                'min_level_dB': min_level,
                'second_min_dB': second_min,
                'required_gap_dB': required_gap_dB,
                'actual_gap_dB': actual_gap_dB,
                'multiplier': self.multiplier_min_loudness
            }
        
        return gap_satisfied, answer_idx, metadata
    
    def generate_volume_levels(self, n_clips: int, question_type: str = None) -> List[float]:
        """
        Generate volume levels dynamically based on multiplier constraints.
        
        The levels are generated to ensure proper gap for the question type:
        - For max_loudness: the loudest is clearly distinguishable (gap = multiplier_max)
        - For min_loudness: the softest is clearly distinguishable (gap = multiplier_min)
        
        Args:
            n_clips: Number of clips
            question_type: "max_loudness" or "min_loudness" to ensure proper gap
            
        Returns:
            List of volume adjustments in dB (integers)
        """
        # Base spacing between adjacent volume levels (minimum audible difference)
        # 6 dB = 2x amplitude, 12 dB = 4x amplitude (clearly distinguishable)
        min_diff = 12  # 12 dB is a VERY noticeable difference (4x perceived loudness)
        
        # Calculate required gap based on multiplier (round up to nearest int)
        if question_type == "max_loudness":
            required_gap = int(math.ceil(20 * math.log10(self.multiplier_max_loudness)))
        elif question_type == "min_loudness":
            required_gap = int(math.ceil(abs(20 * math.log10(self.multiplier_min_loudness))))
        else:
            required_gap = min_diff
        
        # Ensure gap is at least min_diff
        required_gap = max(required_gap, min_diff)
        
        if question_type == "max_loudness":
            # Generate levels where max has clear gap from others
            # Max level (answer) at a high value - MUCH louder
            max_level = 18  # dB adjustment = ~8x louder than baseline
            
            # Other levels should be at least required_gap below max
            # Spread them out with min_diff spacing
            other_levels = []
            current_level = max_level - required_gap
            for i in range(n_clips - 1):
                other_levels.append(current_level)
                current_level -= min_diff
            
            selected_levels = other_levels + [max_level]
            
        elif question_type == "min_loudness":
            # Generate levels where min has clear gap from others
            # Min level (answer) at a low value - MUCH quieter
            min_level = -24  # dB adjustment = ~1/16th of baseline volume
            
            # Other levels should be at least required_gap above min
            # Spread them out with min_diff spacing
            other_levels = []
            current_level = min_level + required_gap
            for i in range(n_clips - 1):
                other_levels.append(current_level)
                current_level += min_diff
            
            selected_levels = [min_level] + other_levels
            
        else:
            # Default: evenly spaced levels centered around 0
            total_range = (n_clips - 1) * min_diff
            start_level = -total_range // 2
            selected_levels = [start_level + i * min_diff for i in range(n_clips)]
        
        # Shuffle to randomize order in the audio
        random.shuffle(selected_levels)
        
        return selected_levels
    
    def generate_sample(self, sample_id: int, target_question_type: str = None, target_duration_seconds: float = None) -> Dict:
        """
        Generate a single volume task sample.
        
        Pipeline:
        1. Pick dataset -> pick class -> pick audio clip
        2. NORMALIZE all clips to baseline dBFS (critical for controlled comparison)
        3. Apply different volume adjustments to each clip
        4. Concatenate clips with silences
        
        Optionally: use same clip with different volume levels if configured.
        
        Args:
            sample_id: Sample ID number
            target_question_type: Target question type for balanced distribution
            target_duration_seconds: Pre-generated target duration (from generate_sample_durations_for_task)
            
        Returns:
            Dictionary with sample metadata
        """
        # Use pre-generated duration or generate one (backward compatibility)
        if target_duration_seconds is not None:
            clip_duration_seconds = target_duration_seconds
        else:
            clip_duration_seconds = generate_single_clip_duration(
                self.min_clip_duration,
                self.max_clip_duration
            )

        # Calculate how many clips we need using the new helper
        max_clips, remainder_seconds = get_max_clip_num_to_be_joined(
            clip_duration_seconds,
            self.source_clip_duration,
            self.min_silence_ms
        )
        
        max_clips_per_sample = self.task_config.get('max_clips_per_sample', 10)
        
        # Silence reduction strategy: subsample from [max(2, max_clips-3), min(max_clips, max_clips_per_sample)]
        # This ensures we use close to max_clips that fit, reducing excessive silence
        
        # Calculate valid range for this sample's duration
        min_clips_for_sample = max(2, max_clips - 3)  # At least 2, preferably max_clips-3
        max_clips_for_sample = min(max_clips, max_clips_per_sample, len(self.dataset.CATEGORIES))
        
        # Validate range
        if max_clips_for_sample < 2:
            raise ValueError(
                f"Sample {sample_id}: Cannot generate volume task - need at least 2 clips. "
                f"max_clips={max_clips}, max_clips_per_sample={max_clips_per_sample}, "
                f"duration={clip_duration_seconds:.1f}s. Increase min_clip_duration."
            )
        
        if min_clips_for_sample > max_clips_for_sample:
            raise ValueError(
                f"Sample {sample_id}: Invalid clip range - min_clips ({min_clips_for_sample}) > max_clips ({max_clips_for_sample}). "
                f"max_clips={max_clips}, max_clips_per_sample={max_clips_per_sample}, duration={clip_duration_seconds:.1f}s"
            )
        
        # Randomly select from valid range (NO balanced pool for volume task)
        n_clips = random.randint(min_clips_for_sample, max_clips_for_sample)
        n_clips = max(2, n_clips)  # Ensure at least 2 for volume comparison
        
        # Pre-select question type to determine answer position
        # Use target question type if provided, otherwise randomly select
        if target_question_type is not None:
            question_type = target_question_type
        else:
            question_type = random.choice(self.task_config['question_types'])
        
        # Generate volume levels and verify gap constraint
        max_attempts = 10
        gap_satisfied = False
        volume_levels = None
        gap_metadata = None
        
        for attempt in range(max_attempts):
            volume_levels = self.generate_volume_levels(n_clips, question_type)
            gap_satisfied, answer_idx, gap_metadata = self._verify_loudness_gap(
                volume_levels, question_type
            )
            
            if gap_satisfied:
                break
            
            self.logger.debug(
                f"Sample {sample_id} attempt {attempt+1}: gap not satisfied, "
                f"required={gap_metadata['required_gap_dB']:.1f}dB, "
                f"actual={gap_metadata['actual_gap_dB']:.1f}dB"
            )
        
        if not gap_satisfied and self.reject_if_gap_not_met:
            self.logger.warning(
                f"Sample {sample_id} rejected: loudness gap not satisfied after {max_attempts} attempts"
            )
            return None
        
        # Determine answer position based on question type
        if question_type == 'max_loudness':
            answer_idx = volume_levels.index(max(volume_levels))
        else:  # min_loudness
            answer_idx = volume_levels.index(min(volume_levels))
        
        # Select answer category from least-used categories
        answer_category = self.dataset.get_least_used_categories(1)[0]
        
        # Determine if using same clip with different volumes
        if self.use_same_clip_different_volumes:
            # Use ONE source clip repeated at different volume levels
            selected_categories = [answer_category] * n_clips
            # Track usage
            self.dataset.category_usage_counts[answer_category] += 1
            correct_category = answer_category
        else:
            # Use different source clips (original behavior)
            # Sample remaining categories, ensuring balanced distribution
            if n_clips <= len(self.dataset.CATEGORIES):
                other_categories = self.dataset.get_least_used_categories(
                    n_clips - 1,
                    exclude=[answer_category]
                )
            else:
                # Need more clips than unique categories
                other_categories = self.dataset.get_least_used_categories(
                    min(n_clips - 1, len(self.dataset.CATEGORIES) - 1),
                    exclude=[answer_category]
                )
                # Add random repetitions if needed
                while len(other_categories) < n_clips - 1:
                    other_categories.append(random.choice(self.dataset.CATEGORIES))
            
            # Arrange categories with answer at correct position
            selected_categories = []
            other_idx = 0
            for i in range(n_clips):
                if i == answer_idx:
                    selected_categories.append(answer_category)
                else:
                    selected_categories.append(other_categories[other_idx])
                    other_idx += 1
            
            # Track usage of answer category
            self.dataset.category_usage_counts[answer_category] += 1
            
            # CRITICAL BUG FIX: Verify answer_category is actually at answer_idx
            if selected_categories[answer_idx] != answer_category:
                self.logger.error(f"Sample {sample_id}: Answer mismatch! Expected {answer_category} at index {answer_idx}, got {selected_categories[answer_idx]}")
                correct_category = selected_categories[answer_idx]
            else:
                correct_category = answer_category
        
        # Sample files and process audio
        audio_segments = []
        filenames_list = []
        original_loudness = []
        final_loudness = []
        
        if self.use_same_clip_different_volumes:
            # Load one file and repeat it with different volumes
            filename, filepath = self.dataset.sample_file_from_category(answer_category)
            base_audio = self.audio_processor.load_audio(filepath)
            original_loudness_val = self._get_amplitude_loudness(base_audio)
            
            # Normalize to baseline first
            base_audio_normalized = self._normalize_to_baseline(base_audio)
            
            for i in range(n_clips):
                # Apply volume adjustment to normalized audio
                audio_adjusted = self.audio_processor.adjust_volume(
                    base_audio_normalized, 
                    volume_levels[i]
                )
                audio_segments.append(audio_adjusted)
                filenames_list.append(filename)
                original_loudness.append(original_loudness_val)
                final_loudness.append(self._get_amplitude_loudness(audio_adjusted))
        else:
            # Use different files (original behavior but with normalization)
            for i, category in enumerate(selected_categories):
                filename, filepath = self.dataset.sample_file_from_category(category)
                audio = self.audio_processor.load_audio(filepath)
                
                # Record original loudness
                orig_loud = self._get_amplitude_loudness(audio)
                original_loudness.append(orig_loud)
                
                # STEP 1: Normalize to baseline dBFS
                audio_normalized = self._normalize_to_baseline(audio)
                
                # STEP 2: Apply volume adjustment (relative to baseline)
                audio_adjusted = self.audio_processor.adjust_volume(
                    audio_normalized, 
                    volume_levels[i]
                )
                
                audio_segments.append(audio_adjusted)
                filenames_list.append(filename)
                final_loudness.append(self._get_amplitude_loudness(audio_adjusted))
        
        # Build final audio with guaranteed silences between clips
        output_audio_path = self.audio_output / f"{sample_id}.wav"
        final_audio = build_clip_sequence_with_silences(
            audio_segments,
            clip_duration_seconds,
            min_silence_ms=self.min_silence_ms,
            max_extra_silence_per_gap_ms=self.max_extra_silence_per_gap_ms,
            crossfade_ms=self.crossfade_ms
        )
        
        # Save the audio
        final_audio.export(str(output_audio_path), format="wav")
        
        # Generate MCQ
        mcq_question = self.task_config['mcq_questions'][question_type]
        mcq_data = self.question_generator.generate_category_mcq(
            mcq_question,
            correct_category,
            selected_categories,
            self.dataset.CATEGORIES
        )
        
        # Generate open-text question
        open_text_question = self.task_config['open_text_questions'][question_type]
        open_text_data = self.question_generator.generate_category_open_text(
            open_text_question,
            correct_category
        )
        
        # Create category to volume mapping
        category_volumes = {
            selected_categories[i]: volume_levels[i]
            for i in range(n_clips)
        }
        
        # Create metadata
        metadata = {
            'id': sample_id,
            'audio_path': str(output_audio_path.relative_to(self.output_base.parent)),
            'n_clips': n_clips,
            'question_type': question_type,
            'audio_sequence': selected_categories,
            'volume_levels_db': volume_levels,
            'category_volumes': category_volumes,
            'correct_answer_category': correct_category,
            'correct_volume_db': volume_levels[answer_idx],
            'source_files': filenames_list,
            'use_same_clip': self.use_same_clip_different_volumes,
            'baseline_dBFS': self.baseline_dBFS if self.normalize_to_baseline else None,
            'original_loudness_dBFS': original_loudness,
            'final_loudness_dBFS': final_loudness,
            'gap_satisfied': gap_satisfied,
            'gap_metadata': gap_metadata,
            'mcq_question': mcq_data['question'],
            'mcq_options': mcq_data['options'],
            'mcq_correct_answer': mcq_data['correct_answer'],
            'open_text_question': open_text_data['question'],
            'open_text_answer': open_text_data['correct_answer']
        }
        
        self.logger.info(
            f"Generated volume sample {sample_id}: {question_type}, {n_clips} clips, "
            f"volumes={volume_levels}, gap_satisfied={gap_satisfied}, "
            f"gap={gap_metadata['actual_gap_dB']:.1f}dB (required={gap_metadata['required_gap_dB']:.1f}dB)"    
        )
        
        return metadata
    
    def generate_dataset(self) -> tuple:
        """
        Generate the complete volume task dataset.
        
        Uses generate_sample_durations_for_task() to pre-generate exact sample durations
        that sum to exactly the target task duration. This guarantees:
        - Exact coverage of target duration
        - No estimation errors from average-based calculation
        
        Returns:
            Tuple of (mcq_csv_path, open_text_csv_path)
        """
        # Generate sample durations upfront (guarantees exact total duration)
        sample_durations = generate_sample_durations_for_task(
            self.task_duration_hours,
            self.min_clip_duration,
            self.max_clip_duration
        )
        num_samples = len(sample_durations)
        
        self.logger.info(f"Generating {num_samples} volume task samples (target: {self.task_duration_hours}h, exact fill)...")
        
        # Create balanced question type distribution (NO clips balancing for volume task)
        question_types = self.task_config['question_types']
        balanced_question_types = []
        samples_per_type = num_samples // len(question_types)
        remainder = num_samples % len(question_types)
        
        for qtype in question_types:
            count = samples_per_type + (1 if remainder > 0 else 0)
            balanced_question_types.extend([qtype] * count)
            remainder = max(0, remainder - 1)
        
        random.shuffle(balanced_question_types)
        from collections import Counter
        type_dist = Counter(balanced_question_types)
        self.logger.info(f"Balanced question type distribution: {dict(sorted(type_dist.items()))}")
        
        all_metadata = []
        
        for i, target_duration in enumerate(sample_durations):
            metadata = self.generate_sample(i, target_question_type=balanced_question_types[i], target_duration_seconds=target_duration)
            all_metadata.append(metadata)        # Save MCQ CSV
        mcq_csv_path = self.output_base / 'volume_mcq.csv'
        self._save_mcq_csv(all_metadata, mcq_csv_path)
        
        # Save open-text CSV
        open_text_csv_path = self.output_base / 'volume_open_text.csv'
        self._save_open_text_csv(all_metadata, open_text_csv_path)
        
        # Save metadata CSV
        metadata_csv_path = self.output_base / 'volume_metadata.csv'
        self._save_metadata_csv(all_metadata, metadata_csv_path)
        
        self.logger.info(f"Volume task dataset generation complete!")
        self.logger.info(f"  - MCQ CSV: {mcq_csv_path}")
        self.logger.info(f"  - Open-text CSV: {open_text_csv_path}")
        self.logger.info(f"  - Metadata CSV: {metadata_csv_path}")
        self.logger.info(f"  - Audio files: {self.audio_output}")
        
        return mcq_csv_path, open_text_csv_path
    
    def _save_mcq_csv(self, metadata_list: List[Dict], output_path: Path):
        """Save MCQ format CSV."""
        with open(output_path, 'w', newline='') as f:
            writer = csv.writer(f)
            # Header
            writer.writerow([
                'question', 'id', 'audio_path',
                'optionA', 'optionB', 'optionC', 'optionD',
                'correct', 'question_type', 'audio_sequence',
                'category_volumes'
            ])
            
            # Data rows
            for meta in metadata_list:
                writer.writerow([
                    meta['mcq_question'],
                    meta['id'],
                    meta['audio_path'],
                    meta['mcq_options']['A'],
                    meta['mcq_options']['B'],
                    meta['mcq_options']['C'],
                    meta['mcq_options']['D'],
                    meta['mcq_correct_answer'],
                    meta['question_type'],
                    str(meta['audio_sequence']),
                    str(meta['category_volumes'])
                ])
    
    def _save_open_text_csv(self, metadata_list: List[Dict], output_path: Path):
        """Save open-text format CSV."""
        with open(output_path, 'w', newline='') as f:
            writer = csv.writer(f)
            # Header
            writer.writerow([
                'question', 'id', 'audio_path', 'answer',
                'question_type', 'audio_sequence', 'category_volumes'
            ])
            
            # Data rows
            for meta in metadata_list:
                writer.writerow([
                    meta['open_text_question'],
                    meta['id'],
                    meta['audio_path'],
                    meta['open_text_answer'],
                    meta['question_type'],
                    str(meta['audio_sequence']),
                    str(meta['category_volumes'])
                ])
    
    def _save_metadata_csv(self, metadata_list: List[Dict], output_path: Path):
        """Save detailed metadata CSV."""
        with open(output_path, 'w', newline='') as f:
            writer = csv.writer(f)
            # Header
            writer.writerow([
                'id', 'audio_path', 'n_clips', 'question_type',
                'audio_sequence', 'volume_levels_db', 'correct_answer',
                'correct_volume_db', 'source_files'
            ])
            
            # Data rows
            for meta in metadata_list:
                writer.writerow([
                    meta['id'],
                    meta['audio_path'],
                    meta['n_clips'],
                    meta['question_type'],
                    str(meta['audio_sequence']),
                    str(meta['volume_levels_db']),
                    meta['correct_answer_category'],
                    meta['correct_volume_db'],
                    str(meta['source_files'])
                ])


def main(config_path: str = None):
    """Main entry point for volume task generation."""
    import yaml
    
    # Load configuration
    if config_path is None:
        config_path = Path(__file__).parent.parent / 'config.yaml'
    
    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)
    
    # Set random seed
    set_random_seed(config['random_seed'])
    
    # Setup logger
    logger = setup_logger(
        'volume_task',
        log_file=str(Path(config['output']['base_path']) / config['logging']['log_file']),
        level=config['logging']['level'],
        console_output=config['logging']['console_output']
    )
    
    # Generate dataset
    generator = VolumeTaskGenerator(config, logger)
    generator.generate_dataset()


if __name__ == '__main__':
    main()