File size: 23,211 Bytes
5fed0fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
"""

Batch evaluator for running multiple evaluations with incremental progress.



Supports:

- Batch evaluation of multiple solution-problem pairs

- Incremental evaluation (resume from where you left off)

- Parallel execution (configurable concurrency)

- Both Docker and SkyPilot backends

- Bucket storage for results (S3/GCS)

- Progress bar with tqdm

- Export failed/pending/aggregated results

"""

import logging
import sys
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import Callable, Iterator, List, Optional

try:
    from tqdm import tqdm
    HAS_TQDM = True
except ImportError:
    HAS_TQDM = False

from ..runner.base import EvaluationResult, EvaluationStatus
from ..runner.docker import DockerRunner
from .pair import Pair, expand_pairs, read_pairs_file, read_problems_file, read_models_file, read_variants_file
from .state import EvaluationState, PairResult

logger = logging.getLogger(__name__)


class BatchEvaluator:
    """

    Batch evaluator with incremental progress tracking.



    Example usage:

        # Evaluate all problems for a model

        evaluator = BatchEvaluator(results_dir="results/gpt5")

        evaluator.evaluate_model("gpt-5", problems=["flash_attn", "cross_entropy"])



        # Resume an interrupted evaluation

        evaluator = BatchEvaluator(results_dir="results/gpt5")

        evaluator.resume()



        # Evaluate from a pairs file

        evaluator = BatchEvaluator(results_dir="results/batch1")

        evaluator.# Use batch.scan_solutions_dir() or evaluate_pairs()

    """

    STATE_FILE = ".state.json"

    def __init__(

        self,

        results_dir: Path,

        *,

        base_dir: Optional[Path] = None,

        backend: str = "docker",

        max_concurrent: int = 1,

        timeout: Optional[int] = None,

        bucket_url: Optional[str] = None,

    ):
        """

        Initialize batch evaluator.



        Args:

            results_dir: Directory for results and state

            base_dir: Frontier-CS base directory (auto-detected if None)

            backend: Evaluation backend ("docker" or "skypilot")

            max_concurrent: Maximum concurrent evaluations

            timeout: Default timeout for evaluations (seconds)

            bucket_url: Optional bucket URL for result storage (s3://... or gs://...)

                       Only used with skypilot backend. Results are written directly

                       to the bucket and synced incrementally.

        """
        self.results_dir = Path(results_dir)
        self.base_dir = base_dir or self._find_base_dir()
        self.backend = backend
        self.max_concurrent = max_concurrent
        self.timeout = timeout
        self.bucket_url = bucket_url
        self._bucket_storage = None

        # Initialize bucket storage if provided
        if bucket_url:
            from ..storage.bucket import BucketStorage
            self._bucket_storage = BucketStorage(
                bucket_url,
                local_cache=self.results_dir / ".bucket_cache",
            )

        self.state_path = self.results_dir / self.STATE_FILE
        self.state = EvaluationState.load(self.state_path)

        # Initialize runner
        if backend == "docker":
            self._runner = DockerRunner(base_dir=self.base_dir)
        else:
            from ..runner.skypilot import SkyPilotRunner
            self._runner = SkyPilotRunner(
                base_dir=self.base_dir,
                bucket_url=bucket_url,
            )

    def _find_base_dir(self) -> Path:
        """Find the Frontier-CS base directory."""
        candidates = [
            Path(__file__).parents[4],  # src/frontier_cs/batch/evaluator.py -> repo root
            Path.cwd(),
            Path.cwd().parent,
        ]
        for candidate in candidates:
            if (candidate / "research").is_dir() and (candidate / "pyproject.toml").exists():
                return candidate
        raise RuntimeError("Could not find Frontier-CS base directory")

    def _save_state(self) -> None:
        """Save current state to disk."""
        self.state.save(self.state_path)

    def sync_from_bucket(self) -> int:
        """

        Sync results from bucket to local state.



        Downloads new results from the bucket and merges them into the local state.

        Uses incremental sync (--size-only) to only download changed files.



        Returns:

            Number of results synced from bucket

        """
        if not self._bucket_storage:
            logger.warning("No bucket storage configured")
            return 0

        # Sync files from bucket to local cache
        count = self._bucket_storage.sync_from_bucket()

        if count == 0:
            return 0

        # Read all results from bucket and merge into state
        bucket_results = self._bucket_storage.read_all_results()
        merged = 0

        for pair_id, result_data in bucket_results.items():
            # Check if we need to update local state
            existing = self.state.results.get(pair_id)

            # Update if: not in state, or bucket has newer/better result
            should_update = (
                existing is None
                or not existing.is_complete
                or (result_data.status == "success" and existing.status != "success")
            )

            if should_update:
                self.state.results[pair_id] = PairResult(
                    pair_id=pair_id,
                    score=result_data.score,
                    status=result_data.status,
                    message=result_data.message,
                    duration_seconds=result_data.duration_seconds,
                    timestamp=result_data.timestamp,
                )
                merged += 1

        if merged > 0:
            self._save_state()
            logger.info(f"Merged {merged} results from bucket into local state")

        return merged

    def evaluate_pairs(

        self,

        pairs: List[Pair],

        *,

        resume: bool = True,

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]] = None,

        show_progress: bool = True,

    ) -> EvaluationState:
        """

        Evaluate a list of pairs.



        Args:

            pairs: List of pairs to evaluate

            resume: Skip already-completed pairs

            on_progress: Callback after each evaluation

            show_progress: Show tqdm progress bar



        Returns:

            Final evaluation state

        """
        # Sync from bucket first if using bucket storage (get results from previous runs)
        if self._bucket_storage:
            self.sync_from_bucket()

        # Initialize state
        if not self.state.started_at:
            self.state.started_at = time.strftime("%Y-%m-%dT%H:%M:%S")
            self.state.backend = self.backend
            self.state.total_pairs = len(pairs)
        self._save_state()

        # Get pending pairs
        if resume:
            pending = self.state.get_pending_pairs(pairs)
            if len(pending) < len(pairs):
                logger.info(f"Resuming: {len(pairs) - len(pending)} pairs already complete")
        else:
            pending = pairs

        if not pending:
            logger.info("All pairs already evaluated")
            self._export_all_results(pairs)
            return self.state

        logger.info(f"Evaluating {len(pending)} pairs (max_concurrent={self.max_concurrent})")

        # Evaluate pairs
        if self.max_concurrent == 1:
            self._evaluate_sequential(pending, on_progress, show_progress)
        else:
            self._evaluate_parallel(pending, on_progress, show_progress)

        # Export all results
        self._export_all_results(pairs)

        return self.state

    def _export_all_results(self, all_pairs: Optional[List[Pair]] = None) -> None:
        """Export all result files."""
        # Sync from bucket to get latest results (for bucket mode)
        if self._bucket_storage:
            self.sync_from_bucket()

        # Basic results
        self.state.export_csv(self.results_dir / "results.csv")
        self.state.export_summary(self.results_dir / "summary.txt")

        # Failed/pending/skipped
        failed_count = self.state.export_failed(self.results_dir / "failed.txt")
        pending_count = self.state.export_pending(self.results_dir / "pending.txt", all_pairs)
        skipped_count = self.state.export_skipped(self.results_dir / "skipped.txt")

        # Aggregated results
        self.state.export_aggregated_csv(self.results_dir / "by_model.csv", by="model")
        self.state.export_aggregated_csv(self.results_dir / "by_problem.csv", by="problem")

        logger.info(f"Results exported to {self.results_dir}")
        if failed_count > 0:
            logger.info(f"  - {failed_count} failed pairs in failed.txt")
        if pending_count > 0:
            logger.info(f"  - {pending_count} pending pairs in pending.txt")

    def _evaluate_sequential(

        self,

        pairs: List[Pair],

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]],

        show_progress: bool = True,

    ) -> None:
        """Evaluate pairs sequentially with optional progress bar."""
        iterator = pairs
        pbar = None

        if show_progress and HAS_TQDM:
            pbar = tqdm(total=len(pairs), desc="Evaluating", unit="pair", dynamic_ncols=True)
            iterator = pairs  # We'll update pbar manually

        try:
            for i, pair in enumerate(pairs, 1):
                if pbar:
                    pbar.set_postfix_str(pair.id[:40])
                else:
                    logger.info(f"[{i}/{len(pairs)}] Evaluating {pair.id}")

                self.state.mark_running(pair)
                self._save_state()

                try:
                    result = self._evaluate_pair(pair)
                    self._record_result(pair, result)

                    if on_progress:
                        on_progress(pair, result)

                    # Log result
                    status_str = "OK" if result.success else "FAIL"
                    score_str = str(result.score) if result.success else (result.message or "error")
                    if pbar:
                        pbar.write(f"  [{status_str}] {pair.id}: {score_str}")

                except Exception as e:
                    logger.error(f"Error evaluating {pair.id}: {e}")
                    self.state.record_result(
                        pair,
                        score=None,
                        status="error",
                        message=str(e),
                    )
                    if pbar:
                        pbar.write(f"  [ERROR] {pair.id}: {e}")
                finally:
                    self._save_state()
                    if pbar:
                        pbar.update(1)
        finally:
            if pbar:
                pbar.close()

    def _evaluate_parallel(

        self,

        pairs: List[Pair],

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]],

        show_progress: bool = True,

    ) -> None:
        """Evaluate pairs in parallel with optional progress bar."""
        pbar = None
        if show_progress and HAS_TQDM:
            pbar = tqdm(total=len(pairs), desc="Evaluating", unit="pair", dynamic_ncols=True)

        try:
            with ThreadPoolExecutor(max_workers=self.max_concurrent) as executor:
                futures = {}
                for pair in pairs:
                    self.state.mark_running(pair)
                    future = executor.submit(self._evaluate_pair, pair)
                    futures[future] = pair
                self._save_state()

                for future in as_completed(futures):
                    pair = futures[future]
                    try:
                        result = future.result()
                        self._record_result(pair, result)

                        if on_progress:
                            on_progress(pair, result)

                        # Log result
                        status_str = "OK" if result.success else "FAIL"
                        score_str = str(result.score) if result.success else (result.message or "error")
                        if pbar:
                            pbar.write(f"  [{status_str}] {pair.id}: {score_str}")

                    except Exception as e:
                        logger.error(f"Error evaluating {pair.id}: {e}")
                        self.state.record_result(
                            pair,
                            score=None,
                            status="error",
                            message=str(e),
                        )
                        if pbar:
                            pbar.write(f"  [ERROR] {pair.id}: {e}")
                    finally:
                        self._save_state()
                        if pbar:
                            pbar.update(1)
        finally:
            if pbar:
                pbar.close()

    def _evaluate_pair(self, pair: Pair) -> EvaluationResult:
        """Evaluate a single pair using the configured runner."""
        # Find solution file
        solution_dir = self.base_dir / "solutions" / pair.solution / "resources"
        solution_file = solution_dir / "solution.py"

        if not solution_file.exists():
            # Try alternate location
            solution_file = self.base_dir / "solutions" / pair.solution / "solution.py"

        if not solution_file.exists():
            return EvaluationResult(
                problem_id=pair.problem,
                status=EvaluationStatus.ERROR,
                message=f"Solution file not found: {pair.solution}",
            )

        return self._runner.evaluate_file(
            pair.problem,
            solution_file,
            timeout=self.timeout,
            solution_id=pair.solution,
        )

    def _record_result(self, pair: Pair, result: EvaluationResult) -> None:
        """Record an evaluation result to state."""
        status_map = {
            EvaluationStatus.SUCCESS: "success",
            EvaluationStatus.ERROR: "error",
            EvaluationStatus.TIMEOUT: "timeout",
            EvaluationStatus.SKIPPED: "skipped",
        }
        self.state.record_result(
            pair,
            score=result.score,
            status=status_map.get(result.status, "error"),
            message=result.message,
            duration_seconds=result.duration_seconds,
        )

    def evaluate_model(

        self,

        model: str,

        problems: List[str],

        *,

        variants: Optional[List[int]] = None,

        resume: bool = True,

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]] = None,

    ) -> EvaluationState:
        """

        Evaluate all problems for a given model.



        Args:

            model: Model name (e.g., "gpt-5", "claude-sonnet-4-5")

            problems: List of problem IDs

            variants: List of variant indices (default: [0])

            resume: Skip already-completed pairs

            on_progress: Callback after each evaluation



        Returns:

            Final evaluation state

        """
        solutions_dir = self.base_dir / "solutions"
        pairs = expand_pairs(
            problems,
            [model],
            variants,
            solutions_dir=solutions_dir,
            validate_paths=True,
        )

        if not pairs:
            logger.warning(f"No valid pairs found for model {model}")
            return self.state

        return self.evaluate_pairs(pairs, resume=resume, on_progress=on_progress)

    def evaluate_problem(

        self,

        problem: str,

        models: List[str],

        *,

        variants: Optional[List[int]] = None,

        resume: bool = True,

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]] = None,

    ) -> EvaluationState:
        """

        Evaluate a problem across all given models.



        Args:

            problem: Problem ID (e.g., "flash_attn")

            models: List of model names

            variants: List of variant indices (default: [0])

            resume: Skip already-completed pairs

            on_progress: Callback after each evaluation



        Returns:

            Final evaluation state

        """
        solutions_dir = self.base_dir / "solutions"
        pairs = expand_pairs(
            [problem],
            models,
            variants,
            solutions_dir=solutions_dir,
            validate_paths=True,
        )

        if not pairs:
            logger.warning(f"No valid pairs found for problem {problem}")
            return self.state

        return self.evaluate_pairs(pairs, resume=resume, on_progress=on_progress)

    def evaluate_pairs_file(

        self,

        pairs_file: Path,

        *,

        resume: bool = True,

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]] = None,

    ) -> EvaluationState:
        """

        Evaluate pairs from a pairs file.



        Args:

            pairs_file: Path to pairs file (solution:problem per line)

            resume: Skip already-completed pairs

            on_progress: Callback after each evaluation



        Returns:

            Final evaluation state

        """
        pairs = read_pairs_file(pairs_file)
        return self.evaluate_pairs(pairs, resume=resume, on_progress=on_progress)

    def evaluate_from_files(

        self,

        problems_file: Path,

        models_file: Path,

        *,

        variants_file: Optional[Path] = None,

        resume: bool = True,

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]] = None,

    ) -> EvaluationState:
        """

        Evaluate pairs by expanding problems × models × variants.



        Args:

            problems_file: Path to problems file (one per line)

            models_file: Path to models file (one per line)

            variants_file: Path to variants file (optional, defaults to [0])

            resume: Skip already-completed pairs

            on_progress: Callback after each evaluation



        Returns:

            Final evaluation state

        """
        problems = read_problems_file(problems_file)
        models = read_models_file(models_file)
        variants = read_variants_file(variants_file) if variants_file else [0]

        solutions_dir = self.base_dir / "solutions"
        pairs = expand_pairs(
            problems,
            models,
            variants,
            solutions_dir=solutions_dir,
            validate_paths=True,
        )

        logger.info(f"Expanded {len(problems)} problems × {len(models)} models × {len(variants)} variants = {len(pairs)} pairs")

        return self.evaluate_pairs(pairs, resume=resume, on_progress=on_progress)

    def resume(

        self,

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]] = None,

    ) -> EvaluationState:
        """

        Resume an interrupted evaluation.



        Requires that the state file exists from a previous run.



        Returns:

            Final evaluation state

        """
        if not self.state.results:
            raise ValueError("No previous evaluation state found")

        # Reconstruct pairs from state
        pairs = [
            Pair(solution=pair_id.split(":")[0], problem=pair_id.split(":")[1])
            for pair_id in self.state.results.keys()
        ]

        # Add any pairs that were never started
        # This handles the case where we added pairs to the state but never evaluated them

        return self.evaluate_pairs(pairs, resume=True, on_progress=on_progress)

    def get_status(self) -> dict:
        """Get current evaluation status."""
        return {
            "total_pairs": self.state.total_pairs,
            "completed": self.state.completed_count,
            "successful": self.state.success_count,
            "errors": self.state.error_count,
            "pending": self.state.total_pairs - self.state.completed_count,
            "started_at": self.state.started_at,
            "updated_at": self.state.updated_at,
        }

    def retry_failed(

        self,

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]] = None,

        show_progress: bool = True,

    ) -> EvaluationState:
        """

        Retry all failed pairs from the current state.



        Returns:

            Final evaluation state

        """
        failed_pairs = self.state.get_failed_pairs()
        if not failed_pairs:
            logger.info("No failed pairs to retry")
            return self.state

        logger.info(f"Retrying {len(failed_pairs)} failed pairs")

        # Clear failed status so they can be retried
        for pair in failed_pairs:
            del self.state.results[pair.id]
        self._save_state()

        return self.evaluate_pairs(failed_pairs, resume=False, on_progress=on_progress, show_progress=show_progress)

    def evaluate_missing(

        self,

        problems: List[str],

        models: List[str],

        *,

        variants: Optional[List[int]] = None,

        on_progress: Optional[Callable[[Pair, EvaluationResult], None]] = None,

        show_progress: bool = True,

    ) -> EvaluationState:
        """

        Evaluate only missing pairs (those not yet in results).



        Useful for completing an evaluation run after adding new models/problems.



        Args:

            problems: List of all problem IDs

            models: List of all model names

            variants: List of variant indices (default: [0])

            on_progress: Callback after each evaluation

            show_progress: Show progress bar



        Returns:

            Final evaluation state

        """
        solutions_dir = self.base_dir / "solutions"
        all_pairs = expand_pairs(
            problems,
            models,
            variants,
            solutions_dir=solutions_dir,
            validate_paths=True,
        )

        # Find pairs not in current state
        missing = [p for p in all_pairs if p.id not in self.state.results]

        if not missing:
            logger.info("No missing pairs to evaluate")
            return self.state

        logger.info(f"Found {len(missing)} missing pairs out of {len(all_pairs)} total")

        # Update total_pairs to include all pairs
        self.state.total_pairs = len(all_pairs)
        self._save_state()

        return self.evaluate_pairs(missing, resume=False, on_progress=on_progress, show_progress=show_progress)