File size: 6,477 Bytes
1e34522
 
 
 
6546def
1e34522
 
 
 
 
57eb2f4
 
d2e36fd
57eb2f4
 
 
1e34522
 
 
406aa65
bd8143b
df6b6fb
406aa65
df6b6fb
0b5fcb3
 
 
 
 
 
 
 
 
 
 
3e2e58f
0314b27
 
 
 
 
 
 
 
 
df6b6fb
0314b27
378f7fe
0314b27
 
 
57eb2f4
0314b27
378f7fe
 
0314b27
 
 
57eb2f4
0314b27
378f7fe
0314b27
b5c7e85
0314b27
 
 
57eb2f4
df6b6fb
57eb2f4
378f7fe
57eb2f4
 
 
 
378f7fe
 
57eb2f4
 
 
 
378f7fe
57eb2f4
378f7fe
57eb2f4
 
 
d1aa673
df6b6fb
05f34b8
df6b6fb
 
d1aa673
 
 
df6b6fb
 
d2e36fd
1e34522
d2e36fd
 
 
0b5fcb3
1e34522
 
 
df6b6fb
1e34522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import json
import os

import pandas as pd
import numpy as np
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results


CLOSED_MODELS = {
    "openai/GPT-4o": {"params": 72000, "license": "proprietary", "likes": 0, "model_type": "🔒 : closed"},
    "Claude-3.5-Sonnet": {"params": 72000, "license": "proprietary", "likes": 0, "model_type": "🔒 : closed"},
}


def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
    """Creates a dataframe from all the individual experiment results"""
    raw_data = get_raw_eval_results(results_path, requests_path)
    all_data_json = [v.to_dict() for v in raw_data]
    print(all_data_json)

    df = pd.DataFrame.from_records(all_data_json)
    # print(df.head(10))
    # for model_name, info in CLOSED_MODELS.items():
    #     if model_name not in df['Model'].values:
    #         df = pd.concat([df, pd.DataFrame([{
    #             "Model": model_name,
    #             "params": info["params"],
    #             "license": info["license"],
    #             "likes": info["likes"],
    #             "model_type": info["model_type"],
    #             "Precision (%)": 0,
    #             "Title search rate (%)": 0
    #         }])], ignore_index=True)

    def extract_first(value):
        if isinstance(value, (list, np.ndarray)):
            return value[0] if len(value) > 0 else 0
        elif isinstance(value, (int, float)):
            return value
        else:
            return 0

    df["(T1) Precision (%)"] = df["(T1) Precision (%)"].apply(extract_first)
    # 将数组转标量,空数组变为 0
    df["(T1) Precision (%)"] = df["(T1) Precision (%)"].apply(extract_first)
    df["(T1) Title Search Rate (%)"] = df["(T1) Title Search Rate (%)"].apply(extract_first)
    df["(T1) Overlap (%)"] = df["(T1) Overlap (%)"].apply(extract_first)
    df["(T1) Precision (First Author) (%)"] = df["(T1) Precision (First Author) (%)"].apply(extract_first)
    df["(T1) Overlap (First Author) (%)"] = df["(T1) Overlap (First Author) (%)"].apply(extract_first)
    # Task 2
    df["(T2) Similarity (%)"] = df["(T2) Similarity (%)"].apply(extract_first)
    df["(T2) Entail (TRUE %)"] = df["(T2) Entail (TRUE %)"].apply(extract_first)
    df["(T2) Entail (GPT-4o %)"] = df["(T2) Entail (GPT-4o %)"].apply(extract_first)
    df["(T2) ROUGE-1 (%)"] = df["(T2) ROUGE-1 (%)"].apply(extract_first)
    df["(T2) ROUGE-2 (%)"] = df["(T2) ROUGE-2 (%)"].apply(extract_first)
    df["(T2) ROUGE-L (%)"] = df["(T2) ROUGE-L (%)"].apply(extract_first)
    # Task 3
    df["(T3) Precision (%)"] = df["(T3) Precision (%)"].apply(extract_first)
    df["(T3) Title Search Rate (%)"] = df["(T3) Title Search Rate (%)"].apply(extract_first)
    df["(T3) Overlap (%)"] = df["(T3) Overlap (%)"].apply(extract_first)
    df["(T3) KPR (%)"] = df["(T3) KPR (%)"].apply(extract_first)
    df["(T3) ROUGE-1 (%)"] = df["(T3) ROUGE-1 (%)"].apply(extract_first)
    df["(T3) ROUGE-2 (%)"] = df["(T3) ROUGE-2 (%)"].apply(extract_first)
    df["(T3) ROUGE-L (%)"] = df["(T3) ROUGE-L (%)"].apply(extract_first)
    
    # 平均值列
    df["Average ⬆️"] = df[["(T1) Precision (%)",
                           "(T1) Title Search Rate (%)",
                           "(T1) Overlap (%)",
                           "(T1) Precision (First Author) (%)",
                           "(T1) Overlap (First Author) (%)",
                           "(T2) Similarity (%)",
                           "(T2) Entail (TRUE %)",
                           "(T2) Entail (GPT-4o %)",
                           "(T2) ROUGE-1 (%)",
                           "(T2) ROUGE-2 (%)",
                           "(T2) ROUGE-L (%)",
                           "(T3) Precision (%)",
                           "(T3) Title Search Rate (%)",
                           "(T3) Overlap (%)",
                           "(T3) KPR (%)",
                           "(T3) ROUGE-1 (%)",
                           "(T3) ROUGE-2 (%)",
                           "(T3) ROUGE-L (%)"]].mean(axis=1)

    # 排序
    df = df.sort_values(by=["Average ⬆️"], ascending=False)

    # 保留需要显示的列
    cols = [c for c in cols if c in df.columns]
    df = df[cols].round(2)

    # 如果 benchmark_cols 有列不在 df 中,忽略
    benchmark_cols = [c for c in benchmark_cols if c in df.columns]
    df[benchmark_cols] = df[benchmark_cols].fillna(0)

    # if benchmark_cols:
    #     df = df[has_no_nan_values(df, benchmark_cols)]

    # print(df.head(10))
    return df



def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
    """Creates the different dataframes for the evaluation queues requestes"""
    entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
    all_evals = []

    for entry in entries:
        if ".json" in entry:
            file_path = os.path.join(save_path, entry)
            with open(file_path) as fp:
                data = json.load(fp)

            data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
            data[EvalQueueColumn.revision.name] = data.get("revision", "main")

            all_evals.append(data)
        elif ".md" not in entry:
            # this is a folder
            sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
            for sub_entry in sub_entries:
                file_path = os.path.join(save_path, entry, sub_entry)
                with open(file_path) as fp:
                    data = json.load(fp)

                data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
                data[EvalQueueColumn.revision.name] = data.get("revision", "main")
                all_evals.append(data)

    pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
    running_list = [e for e in all_evals if e["status"] == "RUNNING"]
    finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
    df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
    df_running = pd.DataFrame.from_records(running_list, columns=cols)
    df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
    return df_finished[cols], df_running[cols], df_pending[cols]