File size: 26,250 Bytes
fec9168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
"""
Task 3: Order - Generate temporal ordering questions

This task joins multiple audio sources and asks questions about their temporal order
(first, last, what comes after, what comes before).
"""

import csv
import random
import math
from pathlib import Path
from typing import Dict, List

import sys
sys.path.append(str(Path(__file__).parent.parent))

from utils import (
    AudioProcessor, ESC50Dataset, QuestionGenerator, LLMQuestionGenerator,
    setup_logger, set_random_seed, calculate_num_samples_for_task,
    generate_single_clip_duration, get_max_clip_num_to_be_joined,
    build_clip_sequence_with_silences, generate_sample_durations_for_task
)


class OrderTaskGenerator:
    """Generator for temporal ordering task dataset."""
    
    def __init__(self, config: Dict, logger):
        """
        Initialize order task generator.
        
        Args:
            config: Configuration dictionary
            logger: Logger instance
        """
        self.config = config
        self.logger = logger
        self.task_config = config['tasks']['order']
        
        # Initialize components
        self.dataset = ESC50Dataset(
            config['esc50']['metadata_path'],
            config['esc50']['audio_path'],
            config  # Pass config for class subset loading
        )
        self.audio_processor = AudioProcessor(
            crossfade_duration=config['audio']['crossfade_duration'],
            silence_duration=config['audio']['silence_duration'],
            with_silence=config['audio']['with_silence'],
            normalize=config['audio']['normalize'],
            normalize_target_dBFS=config['audio']['normalize_target_dBFS'],
            synthetic_silence_path=config['synthetic_silence']['path']
        )
        self.question_generator = QuestionGenerator(
            num_options=config['mcq']['num_options'],
            option_labels=config['mcq']['option_labels'],
            distractor_strategy=config['mcq']['distractor_strategy']
        )
        
        # Initialize LLM question generator
        self.llm_enabled = config.get('llm', {}).get('enabled', False)
        self.llm_generator = LLMQuestionGenerator(
            enabled=self.llm_enabled,
            template_questions=self.task_config
        )
        
        # Duration settings from config
        self.min_clip_duration = config['audio']['min_clip_duration']
        self.max_clip_duration = config['audio']['max_clip_duration']
        # Duration of individual source clips (ESC-50 default is 5s)
        self.source_clip_duration = config['audio'].get('source_clip_duration', 5.0)
        self.min_silence_ms = config['audio'].get('min_silence_duration', 100)
        self.max_extra_silence_per_gap_ms = config['audio'].get('max_extra_silence_per_gap', 500)
        self.crossfade_ms = config['audio'].get('crossfade_duration', 0)
        self.task_duration_hours = self.task_config['task_duration_size']
        
        # Order task specific settings
        self.allow_source_repetition = self.task_config.get('allow_source_repetition', False)
        self.min_clips_for_second = self.task_config.get('min_clips_for_second_questions', 4)
        
        # Set up output paths
        self.output_base = Path(config['output']['base_path']) / 'order'
        self.output_base.mkdir(parents=True, exist_ok=True)
        self.audio_output = self.output_base / 'audios'
        self.audio_output.mkdir(parents=True, exist_ok=True)
    
    def _get_valid_question_types(self, n_clips: int) -> List[str]:
        """
        Get question types valid for the given number of clips.
        
        "second" and "second_last" require at least min_clips_for_second clips.
        
        Args:
            n_clips: Number of clips in the sample
            
        Returns:
            List of valid question types
        """
        all_types = self.task_config['question_types']
        
        # Filter based on n_clips
        valid_types = []
        for qtype in all_types:
            if qtype in ['second', 'second_last']:
                if n_clips >= self.min_clips_for_second:
                    valid_types.append(qtype)
            elif qtype in ['after', 'before']:
                if n_clips >= 2:
                    valid_types.append(qtype)
            else:  # first, last
                valid_types.append(qtype)
        
        return valid_types if valid_types else ['first', 'last']
    
    def generate_sample(self, sample_id: int, target_question_type: str = None, target_duration_seconds: float = None) -> Dict:
        """
        Generate a single order task sample.
        
        Pipeline: pick dataset -> pick class -> pick audio clip -> get duration ->
        concatenate clips to reach target duration -> modulo to get num clips ->
        inserting silences randomly based on remainder.
        
        Args:
            sample_id: Sample ID number
            target_question_type: Target question type for balanced distribution
            target_duration_seconds: Pre-generated target duration (from generate_sample_durations_for_task)
            
        Returns:
            Dictionary with sample metadata
        """
        # Use pre-generated duration or generate one (backward compatibility)
        if target_duration_seconds is not None:
            clip_duration_seconds = target_duration_seconds
        else:
            clip_duration_seconds = generate_single_clip_duration(
                self.min_clip_duration,
                self.max_clip_duration
            )

        # Calculate how many clips we need using the new helper
        max_clips, remainder_seconds = get_max_clip_num_to_be_joined(
            clip_duration_seconds,
            self.source_clip_duration,
            self.min_silence_ms
        )
        
        max_clips_per_sample = self.task_config.get('max_clips_per_sample', 10)
        
        # Silence reduction strategy: subsample from [max(2, max_clips-3), min(max_clips, max_clips_per_sample)]
        # This ensures we use close to max_clips that fit, reducing excessive silence
        
        # Calculate valid range for this sample's duration
        min_clips_for_sample = max(2, max_clips - 3)  # At least 2, preferably max_clips-3
        max_clips_for_sample = min(max_clips, max_clips_per_sample, len(self.dataset.CATEGORIES))
        
        # Validate range
        if max_clips_for_sample < 2:
            raise ValueError(
                f"Sample {sample_id}: Cannot generate order task - need at least 2 clips. "
                f"max_clips={max_clips}, max_clips_per_sample={max_clips_per_sample}, "
                f"duration={clip_duration_seconds:.1f}s. Increase min_clip_duration."
            )
        
        if min_clips_for_sample > max_clips_for_sample:
            raise ValueError(
                f"Sample {sample_id}: Invalid clip range - min_clips ({min_clips_for_sample}) > max_clips ({max_clips_for_sample}). "
                f"max_clips={max_clips}, max_clips_per_sample={max_clips_per_sample}, duration={clip_duration_seconds:.1f}s"
            )
        
        # Randomly select from valid range (NO balanced pool for order task)
        n_clips = random.randint(min_clips_for_sample, max_clips_for_sample)
        
        # Get valid question types for this n_clips
        valid_question_types = self._get_valid_question_types(n_clips)
        
        if not valid_question_types:
            raise ValueError(
                f"Sample {sample_id}: No valid question types for n_clips={n_clips}. "
                f"This should not happen - check _get_valid_question_types implementation."
            )
        
        # Pre-select question type to determine answer position
        if target_question_type is not None:
            if target_question_type not in valid_question_types:
                raise ValueError(
                    f"Sample {sample_id}: target_question_type='{target_question_type}' not valid for n_clips={n_clips}. "
                    f"Valid types: {valid_question_types}. Balanced distribution should only assign valid types."
                )
            question_type = target_question_type
        else:
            question_type = random.choice(valid_question_types)
        
        # Determine answer position based on question type
        if question_type == 'first':
            answer_position = 0
        elif question_type == 'last':
            answer_position = n_clips - 1
        elif question_type == 'second':
            answer_position = 1  # 0-indexed, so position 1 is second
        elif question_type == 'second_last':
            answer_position = n_clips - 2  # Second to last
        elif question_type == 'after':
            # Answer is after a reference, so position 1 to n-1
            answer_position = random.randint(1, n_clips - 1) if n_clips >= 2 else 0
        else:  # before
            # Answer is before a reference, so position 0 to n-2
            answer_position = random.randint(0, n_clips - 2) if n_clips >= 2 else 0
        
        # Select answer category from least-used categories
        answer_category = self.dataset.get_least_used_categories(1)[0]
        
        # Sample remaining categories, ensuring balanced distribution
        if n_clips <= len(self.dataset.CATEGORIES):
            other_categories = self.dataset.get_least_used_categories(
                n_clips - 1,
                exclude=[answer_category]
            )
        else:
            # Need more clips than unique categories - sample with some repetition
            other_categories = self.dataset.get_least_used_categories(
                min(n_clips - 1, len(self.dataset.CATEGORIES) - 1),
                exclude=[answer_category]
            )
            # Add random repetitions if needed
            while len(other_categories) < n_clips - 1:
                other_categories.append(random.choice(self.dataset.CATEGORIES))
        
        # Arrange categories with answer at correct position
        selected_categories = []
        other_idx = 0
        for i in range(n_clips):
            if i == answer_position:
                selected_categories.append(answer_category)
            else:
                selected_categories.append(other_categories[other_idx])
                other_idx += 1
        
        # Track usage of answer category
        self.dataset.category_usage_counts[answer_category] += 1
        
        # Sample one file from each category and load audio
        audio_segments = []
        filenames_list = []
        
        for category in selected_categories:
            filename, filepath = self.dataset.sample_file_from_category(category)
            audio = self.audio_processor.load_audio(filepath)
            audio_segments.append(audio)
            filenames_list.append(filename)
        
        # Build final audio with guaranteed silences between clips
        output_audio_path = self.audio_output / f"{sample_id}.wav"
        final_audio = build_clip_sequence_with_silences(
            audio_segments,
            clip_duration_seconds,
            min_silence_ms=self.min_silence_ms,
            max_extra_silence_per_gap_ms=self.max_extra_silence_per_gap_ms,
            crossfade_ms=self.crossfade_ms
        )
        
        # Save the audio
        final_audio.export(str(output_audio_path), format="wav")
        
        # Determine correct answer and generate questions based on question type
        # CRITICAL BUG FIX: Verify answer_category is actually at answer_position
        if selected_categories[answer_position] != answer_category:
            self.logger.error(f"Sample {sample_id}: Answer mismatch! Expected {answer_category} at position {answer_position}, got {selected_categories[answer_position]}")
            # Force correct by using actual category at answer_position
            correct_category = selected_categories[answer_position]
        else:
            correct_category = answer_category
        
        if question_type == 'first':
            mcq_question = self.task_config['mcq_questions']['first']
            open_text_question = self.task_config['open_text_questions']['first']
            
        elif question_type == 'last':
            mcq_question = self.task_config['mcq_questions']['last']
            open_text_question = self.task_config['open_text_questions']['last']
            
        elif question_type == 'second':
            mcq_question = self.task_config['mcq_questions']['second']
            open_text_question = self.task_config['open_text_questions']['second']
            
        elif question_type == 'second_last':
            mcq_question = self.task_config['mcq_questions']['second_last']
            open_text_question = self.task_config['open_text_questions']['second_last']
            
        elif question_type == 'after':
            # Reference is the sound before answer_position
            if answer_position > 0:
                reference_category = selected_categories[answer_position - 1]
                mcq_question = self.task_config['mcq_questions']['after'].format(sound1=reference_category)
                open_text_question = self.task_config['open_text_questions']['after'].format(sound1=reference_category)
            else:
                # Fallback shouldn't happen but handle gracefully
                mcq_question = self.task_config['mcq_questions']['first']
                open_text_question = self.task_config['open_text_questions']['first']
                
        else:  # before
            # Reference is the sound after answer_position
            if answer_position < n_clips - 1:
                reference_category = selected_categories[answer_position + 1]
                mcq_question = self.task_config['mcq_questions']['before'].format(sound2=reference_category)
                open_text_question = self.task_config['open_text_questions']['before'].format(sound2=reference_category)
            else:
                # Fallback to 'first' if only 1 clip
                correct_category = selected_categories[0]
                mcq_question = self.task_config['mcq_questions']['first']
                open_text_question = self.task_config['open_text_questions']['first']
                question_type = 'first'
        
        # Generate MCQ
        mcq_data = self.question_generator.generate_category_mcq(
            mcq_question,
            correct_category,
            selected_categories,
            self.dataset.CATEGORIES
        )
        
        # Generate open-text question
        open_text_data = self.question_generator.generate_category_open_text(
            open_text_question,
            correct_category
        )
        
        # Also generate a sequence question for open-text
        sequence_question = self.task_config['open_text_questions']['sequence']
        sequence_data = self.question_generator.generate_sequence_open_text(
            sequence_question,
            selected_categories
        )
        
        # Create metadata
        metadata = {
            'id': sample_id,
            'audio_path': str(output_audio_path.relative_to(self.output_base.parent)),
            'n_clips': n_clips,
            'question_type': question_type,
            'audio_sequence': selected_categories,
            'correct_answer_category': correct_category,
            'source_files': filenames_list,
            'mcq_question': mcq_data['question'],
            'mcq_options': mcq_data['options'],
            'mcq_correct_answer': mcq_data['correct_answer'],
            'open_text_question': open_text_data['question'],
            'open_text_answer': open_text_data['correct_answer'],
            'sequence_question': sequence_data['question'],
            'sequence_answer': sequence_data['correct_answer']
        }
        
        self.logger.info(f"Generated order sample {sample_id}: {question_type}, {n_clips} clips")
        
        return metadata
    
    def generate_dataset(self) -> tuple:
        """
        Generate the complete order task dataset.
        
        Uses generate_sample_durations_for_task() to pre-generate exact sample durations
        that sum to exactly the target task duration. This guarantees:
        - Exact coverage of target duration
        - No estimation errors from average-based calculation
        
        Returns:
            Tuple of (mcq_csv_path, open_text_csv_path, sequence_csv_path)
        """
        # Generate sample durations upfront (guarantees exact total duration)
        sample_durations = generate_sample_durations_for_task(
            self.task_duration_hours,
            self.min_clip_duration,
            self.max_clip_duration
        )
        num_samples = len(sample_durations)
        
        self.logger.info(f"Generating {num_samples} order task samples (target: {self.task_duration_hours}h, exact fill)...")
        
        # Calculate effective max clips each sample can use (accounting for silence reduction)
        # This matches the logic in generate_sample()
        max_clips_per_sample = self.task_config.get('max_clips_per_sample', 10)
        sample_effective_max_clips = []
        
        for duration in sample_durations:
            max_clips, _ = get_max_clip_num_to_be_joined(
                duration,
                self.source_clip_duration,
                self.min_silence_ms
            )
            # Apply the same constraints as generate_sample()
            effective_max = min(max_clips, max_clips_per_sample, len(self.dataset.CATEGORIES))
            sample_effective_max_clips.append(effective_max)
        
        # Create capacity-aware balanced question type distribution
        # Categorize question types by clip requirements
        question_types = self.task_config['question_types']
        
        # Separate into tiers based on clip requirements
        basic_types = ['first', 'last', 'after', 'before']  # Need >= 2 clips
        advanced_types = ['second', 'second_last']  # Need >= min_clips_for_second
        
        # Count how many samples can support each tier (use effective max, not raw max)
        samples_for_basic = sum(1 for emc in sample_effective_max_clips if emc >= 2)
        samples_for_advanced = sum(1 for emc in sample_effective_max_clips if emc >= self.min_clips_for_second)
        
        # Create list of (sample_idx, duration, effective_max_clips)
        sample_info = [(i, sample_durations[i], sample_effective_max_clips[i]) for i in range(num_samples)]
        
        # Sort by capacity (descending) - assign advanced types to high-capacity samples
        sample_info.sort(key=lambda x: x[2], reverse=True)
        
        # Calculate distribution: prefer advanced types for longer clips
        samples_per_type = num_samples // len(question_types)
        remainder = num_samples % len(question_types)
        
        # Build assignment pool - advanced types first (for high-capacity samples)
        assignment_pool = []
        for qtype in advanced_types:
            count = samples_per_type + (1 if remainder > 0 else 0)
            assignment_pool.extend([qtype] * count)
            remainder = max(0, remainder - 1)
        
        for qtype in basic_types:
            count = samples_per_type + (1 if remainder > 0 else 0)
            assignment_pool.extend([qtype] * count)
            remainder = max(0, remainder - 1)
        
        # Assign question types based on capacity
        balanced_assignments = [None] * num_samples
        
        for idx, (sample_idx, duration, capacity) in enumerate(sample_info):
            target_qtype = assignment_pool[idx]
            
            # Validate and adjust if needed
            valid_types = self._get_valid_question_types(capacity)
            
            if target_qtype not in valid_types:
                # Assign a valid alternative - prefer similar types
                if target_qtype in advanced_types and any(t in valid_types for t in basic_types):
                    # Downgrade to basic type
                    target_qtype = random.choice([t for t in basic_types if t in valid_types])
                else:
                    # Fallback to any valid type
                    target_qtype = random.choice(valid_types)
            
            balanced_assignments[sample_idx] = target_qtype
        
        # Log the actual distribution after capacity-aware assignment
        from collections import Counter
        type_dist = Counter(balanced_assignments)
        self.logger.info(f"Balanced question type distribution (after capacity-aware assignment): {dict(sorted(type_dist.items()))}")
        
        all_metadata = []
        
        for i, target_duration in enumerate(sample_durations):
            metadata = self.generate_sample(i, target_question_type=balanced_assignments[i], target_duration_seconds=target_duration)
            all_metadata.append(metadata)        # Save MCQ CSV
        mcq_csv_path = self.output_base / 'order_mcq.csv'
        self._save_mcq_csv(all_metadata, mcq_csv_path)
        
        # Save open-text CSV
        open_text_csv_path = self.output_base / 'order_open_text.csv'
        self._save_open_text_csv(all_metadata, open_text_csv_path)
        
        # Save sequence CSV
        sequence_csv_path = self.output_base / 'order_sequence.csv'
        self._save_sequence_csv(all_metadata, sequence_csv_path)
        
        # Save metadata CSV
        metadata_csv_path = self.output_base / 'order_metadata.csv'
        self._save_metadata_csv(all_metadata, metadata_csv_path)
        
        self.logger.info(f"Order task dataset generation complete!")
        self.logger.info(f"  - MCQ CSV: {mcq_csv_path}")
        self.logger.info(f"  - Open-text CSV: {open_text_csv_path}")
        self.logger.info(f"  - Sequence CSV: {sequence_csv_path}")
        self.logger.info(f"  - Metadata CSV: {metadata_csv_path}")
        self.logger.info(f"  - Audio files: {self.audio_output}")
        
        return mcq_csv_path, open_text_csv_path, sequence_csv_path
    
    def _save_mcq_csv(self, metadata_list: List[Dict], output_path: Path):
        """Save MCQ format CSV."""
        with open(output_path, 'w', newline='') as f:
            writer = csv.writer(f)
            # Header
            writer.writerow([
                'question', 'id', 'audio_path',
                'optionA', 'optionB', 'optionC', 'optionD',
                'correct', 'question_type', 'audio_sequence'
            ])
            
            # Data rows
            for meta in metadata_list:
                writer.writerow([
                    meta['mcq_question'],
                    meta['id'],
                    meta['audio_path'],
                    meta['mcq_options']['A'],
                    meta['mcq_options']['B'],
                    meta['mcq_options']['C'],
                    meta['mcq_options']['D'],
                    meta['mcq_correct_answer'],
                    meta['question_type'],
                    str(meta['audio_sequence'])
                ])
    
    def _save_open_text_csv(self, metadata_list: List[Dict], output_path: Path):
        """Save open-text format CSV."""
        with open(output_path, 'w', newline='') as f:
            writer = csv.writer(f)
            # Header
            writer.writerow([
                'question', 'id', 'audio_path', 'answer',
                'question_type', 'audio_sequence'
            ])
            
            # Data rows
            for meta in metadata_list:
                writer.writerow([
                    meta['open_text_question'],
                    meta['id'],
                    meta['audio_path'],
                    meta['open_text_answer'],
                    meta['question_type'],
                    str(meta['audio_sequence'])
                ])
    
    def _save_sequence_csv(self, metadata_list: List[Dict], output_path: Path):
        """Save sequence question CSV."""
        with open(output_path, 'w', newline='') as f:
            writer = csv.writer(f)
            # Header
            writer.writerow([
                'question', 'id', 'audio_path', 'answer', 'audio_sequence'
            ])
            
            # Data rows
            for meta in metadata_list:
                writer.writerow([
                    meta['sequence_question'],
                    meta['id'],
                    meta['audio_path'],
                    meta['sequence_answer'],
                    str(meta['audio_sequence'])
                ])
    
    def _save_metadata_csv(self, metadata_list: List[Dict], output_path: Path):
        """Save detailed metadata CSV."""
        with open(output_path, 'w', newline='') as f:
            writer = csv.writer(f)
            # Header
            writer.writerow([
                'id', 'audio_path', 'n_clips', 'question_type',
                'audio_sequence', 'correct_answer', 'source_files'
            ])
            
            # Data rows
            for meta in metadata_list:
                writer.writerow([
                    meta['id'],
                    meta['audio_path'],
                    meta['n_clips'],
                    meta['question_type'],
                    str(meta['audio_sequence']),
                    meta['correct_answer_category'],
                    str(meta['source_files'])
                ])


def main(config_path: str = None):
    """Main entry point for order task generation."""
    import yaml
    
    # Load configuration
    if config_path is None:
        config_path = Path(__file__).parent.parent / 'config.yaml'
    
    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)
    
    # Set random seed
    set_random_seed(config['random_seed'])
    
    # Setup logger
    logger = setup_logger(
        'order_task',
        log_file=str(Path(config['output']['base_path']) / config['logging']['log_file']),
        level=config['logging']['level'],
        console_output=config['logging']['console_output']
    )
    
    # Generate dataset
    generator = OrderTaskGenerator(config, logger)
    generator.generate_dataset()


if __name__ == '__main__':
    main()