| import os |
| import logging |
| import time |
| import gradio as gr |
| import datasets |
| from apscheduler.schedulers.background import BackgroundScheduler |
| from huggingface_hub import snapshot_download |
| from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns |
| from gradio_space_ci import enable_space_ci |
|
|
| from src.display.about import ( |
| CITATION_BUTTON_LABEL, |
| CITATION_BUTTON_TEXT, |
| EVALUATION_QUEUE_TEXT, |
| FAQ_TEXT, |
| INTRODUCTION_TEXT, |
| LLM_BENCHMARKS_TEXT, |
| TITLE, |
| ) |
| from src.display.css_html_js import custom_css |
| from src.display.utils import ( |
| BENCHMARK_COLS, |
| COLS, |
| EVAL_COLS, |
| EVAL_TYPES, |
| AutoEvalColumn, |
| ModelType, |
| Precision, |
| WeightType, |
| fields, |
| ) |
| from src.envs import ( |
| API, |
| EVAL_REQUESTS_PATH, |
| AGGREGATED_REPO, |
| H4_TOKEN, |
| QUEUE_REPO, |
| REPO_ID, |
| ) |
| from src.populate import get_evaluation_queue_df, get_leaderboard_df |
| from src.submission.submit import add_new_eval |
| from src.tools.plots import create_metric_plot_obj, create_plot_df, create_scores_df |
|
|
| |
| logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") |
|
|
| |
| enable_space_ci() |
|
|
|
|
| def restart_space(): |
| API.restart_space(repo_id=REPO_ID, token=H4_TOKEN) |
|
|
|
|
| def time_diff_wrapper(func): |
| def wrapper(*args, **kwargs): |
| start_time = time.time() |
| result = func(*args, **kwargs) |
| end_time = time.time() |
| diff = end_time - start_time |
| logging.info(f"Time taken for {func.__name__}: {diff} seconds") |
| return result |
|
|
| return wrapper |
|
|
|
|
| @time_diff_wrapper |
| def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3, backoff_factor=1.5): |
| """Download dataset with exponential backoff retries.""" |
| attempt = 0 |
| while attempt < max_attempts: |
| try: |
| logging.info(f"Downloading {repo_id} to {local_dir}") |
| snapshot_download( |
| repo_id=repo_id, |
| local_dir=local_dir, |
| repo_type=repo_type, |
| tqdm_class=None, |
| etag_timeout=30, |
| max_workers=8, |
| ) |
| logging.info("Download successful") |
| return |
| except Exception as e: |
| wait_time = backoff_factor**attempt |
| logging.error(f"Error downloading {repo_id}: {e}, retrying in {wait_time}s") |
| time.sleep(wait_time) |
| attempt += 1 |
| raise Exception(f"Failed to download {repo_id} after {max_attempts} attempts") |
|
|
|
|
| def init_space(full_init: bool = True): |
| """Initializes the application space, loading only necessary data.""" |
| if full_init: |
| |
| try: |
| download_dataset(QUEUE_REPO, EVAL_REQUESTS_PATH) |
| except Exception: |
| restart_space() |
|
|
| |
| leaderboard_dataset = datasets.load_dataset(AGGREGATED_REPO, "default", split="train") |
| leaderboard_df = get_leaderboard_df( |
| leaderboard_dataset=leaderboard_dataset, |
| cols=COLS, |
| benchmark_cols=BENCHMARK_COLS, |
| ) |
|
|
| |
| eval_queue_dfs = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) |
|
|
| return leaderboard_df, eval_queue_dfs |
|
|
|
|
| |
| |
| do_full_init = os.getenv("LEADERBOARD_FULL_INIT", "True") == "True" |
|
|
| |
| |
| leaderboard_df, eval_queue_dfs = init_space(full_init=do_full_init) |
| finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = eval_queue_dfs |
|
|
|
|
| |
| |
| |
| |
|
|
|
|
| demo = gr.Blocks(css=custom_css) |
| with demo: |
| gr.HTML(TITLE) |
| gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") |
|
|
| with gr.Tabs(elem_classes="tab-buttons") as tabs: |
| with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0): |
| leaderboard = Leaderboard( |
| value=leaderboard_df, |
| datatype=[c.type for c in fields(AutoEvalColumn)], |
| select_columns=SelectColumns( |
| default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default], |
| cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden or c.dummy], |
| label="Select Columns to Display:", |
| ), |
| search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.fullname.name, AutoEvalColumn.license.name], |
| hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden], |
| filter_columns=[ |
| ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"), |
| ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"), |
| ColumnFilter( |
| AutoEvalColumn.params.name, |
| type="slider", |
| min=0.01, |
| max=150, |
| label="Select the number of parameters (B)", |
| ), |
| ColumnFilter( |
| AutoEvalColumn.still_on_hub.name, type="boolean", label="Private or deleted", default=True |
| ), |
| ColumnFilter( |
| AutoEvalColumn.merged.name, type="boolean", label="Contains a merge/moerge", default=True |
| ), |
| ColumnFilter(AutoEvalColumn.moe.name, type="boolean", label="MoE", default=False), |
| ColumnFilter(AutoEvalColumn.not_flagged.name, type="boolean", label="Flagged", default=True), |
| ], |
| bool_checkboxgroup_label="Hide models", |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=3): |
| gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") |
|
|
| with gr.TabItem("❗FAQ", elem_id="llm-benchmark-tab-table", id=4): |
| gr.Markdown(FAQ_TEXT, elem_classes="markdown-text") |
|
|
| with gr.TabItem("🚀 Submit ", elem_id="llm-benchmark-tab-table", id=5): |
| with gr.Column(): |
| with gr.Row(): |
| gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") |
|
|
| with gr.Row(): |
| gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text") |
|
|
| with gr.Row(): |
| with gr.Column(): |
| model_name_textbox = gr.Textbox(label="Model name") |
| revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main") |
| model_type = gr.Dropdown( |
| choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown], |
| label="Model type", |
| multiselect=False, |
| value=ModelType.FT.to_str(" : "), |
| interactive=True, |
| ) |
|
|
| with gr.Column(): |
| precision = gr.Dropdown( |
| choices=[i.value.name for i in Precision if i != Precision.Unknown], |
| label="Precision", |
| multiselect=False, |
| value="float16", |
| interactive=True, |
| ) |
| weight_type = gr.Dropdown( |
| choices=[i.value.name for i in WeightType], |
| label="Weights type", |
| multiselect=False, |
| value="Original", |
| interactive=True, |
| ) |
| base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)") |
|
|
| with gr.Column(): |
| with gr.Accordion( |
| f"✅ Finished Evaluations ({len(finished_eval_queue_df)})", |
| open=False, |
| ): |
| with gr.Row(): |
| finished_eval_table = gr.components.Dataframe( |
| value=finished_eval_queue_df, |
| headers=EVAL_COLS, |
| datatype=EVAL_TYPES, |
| row_count=5, |
| ) |
| with gr.Accordion( |
| f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})", |
| open=False, |
| ): |
| with gr.Row(): |
| running_eval_table = gr.components.Dataframe( |
| value=running_eval_queue_df, |
| headers=EVAL_COLS, |
| datatype=EVAL_TYPES, |
| row_count=5, |
| ) |
|
|
| with gr.Accordion( |
| f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})", |
| open=False, |
| ): |
| with gr.Row(): |
| pending_eval_table = gr.components.Dataframe( |
| value=pending_eval_queue_df, |
| headers=EVAL_COLS, |
| datatype=EVAL_TYPES, |
| row_count=5, |
| ) |
|
|
| submit_button = gr.Button("Submit Eval") |
| submission_result = gr.Markdown() |
| submit_button.click( |
| add_new_eval, |
| [ |
| model_name_textbox, |
| base_model_name_textbox, |
| revision_name_textbox, |
| precision, |
| weight_type, |
| model_type, |
| ], |
| submission_result, |
| ) |
|
|
| with gr.Row(): |
| with gr.Accordion("📙 Citation", open=False): |
| citation_button = gr.Textbox( |
| value=CITATION_BUTTON_TEXT, |
| label=CITATION_BUTTON_LABEL, |
| lines=20, |
| elem_id="citation-button", |
| show_copy_button=True, |
| ) |
|
|
| demo.queue(default_concurrency_limit=40).launch() |
|
|