File size: 4,696 Bytes
7fd00a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2866958
 
 
 
 
 
 
 
 
 
 
 
 
 
7fd00a8
 
 
 
 
 
 
 
 
 
 
f10384f
 
 
7fd00a8
 
 
 
 
 
 
 
0339608
7fd00a8
0339608
7fd00a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0339608
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fd00a8
 
 
 
2866958
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download

from src.about import (
    CITATION_BUTTON_LABEL,
    CITATION_BUTTON_TEXT,
    EVALUATION_QUEUE_TEXT,
    INTRODUCTION_TEXT,
    LLM_BENCHMARKS_TEXT,
    TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
    BENCHMARK_COLS,
    COLS,
    EVAL_COLS,
    EVAL_TYPES,
    AutoEvalColumn,
    ModelType,
    fields,
    WeightType,
    Precision
)
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval


def restart_space():
    API.restart_space(repo_id=REPO_ID)

### Space initialisation
# try:
#     print(EVAL_REQUESTS_PATH)
#     snapshot_download(
#         repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
#     )
# except Exception:
#     restart_space()
# try:
#     print(EVAL_RESULTS_PATH)
#     snapshot_download(
#         repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
#     )
# except Exception:
#     restart_space()


LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)

(
    finished_eval_queue_df,
    running_eval_queue_df,
    pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)

def init_leaderboard(dataframe):
    if dataframe.empty:
        dataframe = pd.DataFrame(columns=[c.name for c in fields(AutoEvalColumn)])

    return Leaderboard(
        value=dataframe,
        datatype=[c.type for c in fields(AutoEvalColumn)],
        select_columns=SelectColumns(
            default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
            cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
            label="Select Columns to Display:",
        ),
        search_columns=[AutoEvalColumn.model.name],
        hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
        filter_columns=[],
        bool_checkboxgroup_label="Hide models",
        interactive=False,
    )


demo = gr.Blocks(css=custom_css)
with demo:
    gr.HTML(TITLE)
    gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")

    with gr.Tabs(elem_classes="tab-buttons") as tabs:
        with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
            leaderboard = init_leaderboard(LEADERBOARD_DF)

        with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
            gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")

        with gr.TabItem("πŸš€ Submit", elem_id="llm-benchmark-tab-table", id=3):
            gr.Markdown("""
We welcome community submissions of new model evaluation results. Those submissions will be listed as 'External', and authors must upload their generated outputs for peer review.

## Evaluation
Evaluation [Setup](https://huggingface.co/docs/hub/spaces-overview) and [Usage](https://huggingface.co/docs/hub/spaces-overview). This will generate a markdown report summarizing the results.

## Submission
To submit your results, create a Pull Request in the [Community Tab](https://huggingface.co/spaces/doubao-bench/web-bench-leaderboard/discussions) to add them to the `src/custom-eval-results` folder in this repository:

* Create a new folder named with your provider and model names (e.g., `ollama_mistral-small`, using underscores to separate parts).
* Each folder stores the evaluation results of only one model.
* Add a `base_meta.json` file with the following fields:
    * **Model**: the name of your model
    * **Model Link**: the link to the model page
    * **Provider**: the name of the provider
    * **Openness**: the openness of the model
    * **Agent**: the agent used for evaluation, `Web-Agent` or your custom agent name
* Put your generated reports (e.g. `eval-20258513-102235.zip`) in your folder.
* The title of the PR should be: `[Community Submission] Model: org/model, Username: your_username`.
* **Tips**: `gen_meta.json` will be created after our review.

We will promptly merge and review your submission. Once the review is complete, we will publish the results on the leaderboard.
""")



scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch(share=True)