File size: 3,020 Bytes
0bd1b0f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from typing import Any

from graphgen.bases import QAPair
from graphgen.utils import run_concurrent


def transform_to_qa_format(
    items: list[dict], format_hint: str = "auto"
) -> list[dict[str, str]]:
    extractors = {
        "ChatML": lambda x: (
            next(
                (
                    m["content"]
                    for m in x.get("messages", [])
                    if m.get("role") == "user"
                ),
                "",
            ),
            next(
                (
                    m["content"]
                    for m in x.get("messages", [])
                    if m.get("role") == "assistant"
                ),
                "",
            ),
        ),
        "Alpaca": lambda x: (
            f"{x.get('instruction', '')}\n\n{x['input']}".strip()
            if x.get("input")
            else x.get("instruction", ""),
            x.get("output", ""),
        ),
        "Sharegpt": lambda x: (
            next(
                (
                    c["value"]
                    for c in x.get("conversations", [])
                    if c.get("from") == "human"
                ),
                "",
            ),
            next(
                (
                    c["value"]
                    for c in x.get("conversations", [])
                    if c.get("from") in ("gpt", "assistant")
                ),
                "",
            ),
        ),
    }

    auto_detect = {
        "messages": "ChatML",
        "conversations": "Sharegpt",
        "instruction": "Alpaca",
    }

    transformed = []
    for item in items:
        fmt = format_hint
        if fmt == "auto":
            fmt = next(
                (fmt_name for key, fmt_name in auto_detect.items() if key in item), None
            )
            if not fmt:
                raise ValueError(
                    "Could not auto-detect format. Please specify format_hint."
                )

        question, answer = extractors[fmt](item)
        options = None
        if "\nOptions:\n" in question:
            q_part, opt_part = question.split("\nOptions:\n", 1)
            question = q_part
            options = {
                k.strip(): v.strip()
                for line in opt_part.strip().split("\n")
                if "." in line
                for k, v in [line.split(".", 1)]
            }

        result = {"question": question.strip(), "answer": answer.strip()}
        if options:
            result["options"] = options
        transformed.append(result)

    return transformed


def evaluate_qa(
    qa_evaluators: dict[str, Any], items: list[dict[str, Any]]
) -> dict[str, Any]:
    items = transform_to_qa_format(items)
    items = [QAPair.from_dict(item) for item in items]

    results = {}
    for key, qa_evaluator in qa_evaluators.items():
        result = run_concurrent(
            qa_evaluator.evaluate,
            items,
            desc=f"Evaluating QA with {key}",
        )
        results[key] = result
    return results