File size: 2,450 Bytes
553fbf7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
"""
MINDI 1.5 Vision-Coder — Evaluation System

Evaluates model quality on code generation benchmarks,
UI quality metrics, and end-to-end task completion.
"""

from __future__ import annotations

from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Optional


@dataclass
class EvalMetrics:
    """Aggregated evaluation metrics."""
    pass_at_1: float = 0.0        # Code correctness (passes tests)
    pass_at_5: float = 0.0        # Code correctness with 5 samples
    ui_quality_score: float = 0.0  # Average vision critic score
    syntax_error_rate: float = 0.0 # Fraction with syntax errors
    type_error_rate: float = 0.0   # Fraction with TypeScript errors
    avg_iterations: float = 0.0    # Average fix iterations needed
    total_examples: int = 0
    details: list[dict[str, Any]] = field(default_factory=list)


class Evaluator:
    """Evaluates MINDI 1.5 model across multiple quality dimensions."""

    def __init__(
        self,
        eval_data_dir: Optional[Path] = None,
        results_dir: Optional[Path] = None,
    ) -> None:
        self.eval_data_dir = eval_data_dir or Path("./data/processed/eval")
        self.results_dir = results_dir or Path("./logs/evaluation")
        self.results_dir.mkdir(parents=True, exist_ok=True)

    async def run_evaluation(
        self,
        pipeline: Any,
        num_samples: int = 100,
    ) -> EvalMetrics:
        """Run full evaluation suite against the inference pipeline."""
        # Will be implemented with actual eval logic
        return EvalMetrics(total_examples=num_samples)

    def save_results(self, metrics: EvalMetrics, run_name: str = "eval") -> Path:
        """Save evaluation results to disk."""
        import json

        output_path = self.results_dir / f"{run_name}_results.json"
        with open(output_path, "w", encoding="utf-8") as f:
            json.dump(
                {
                    "pass_at_1": metrics.pass_at_1,
                    "pass_at_5": metrics.pass_at_5,
                    "ui_quality_score": metrics.ui_quality_score,
                    "syntax_error_rate": metrics.syntax_error_rate,
                    "type_error_rate": metrics.type_error_rate,
                    "avg_iterations": metrics.avg_iterations,
                    "total_examples": metrics.total_examples,
                },
                f,
                indent=2,
            )
        return output_path