Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns | |
| import pandas as pd | |
| from apscheduler.schedulers.background import BackgroundScheduler | |
| from huggingface_hub import snapshot_download | |
| from src.about import ( | |
| CITATION_BUTTON_LABEL, | |
| CITATION_BUTTON_TEXT, | |
| EVALUATION_QUEUE_TEXT, | |
| INTRODUCTION_TEXT, | |
| ABOUT_TEXT, | |
| TITLE, | |
| Training_Dataset, | |
| Testing_Type | |
| ) | |
| from src.display.css_html_js import custom_css | |
| from src.display.utils import ( | |
| BENCHMARK_COLS, | |
| COLS, | |
| EVAL_COLS, | |
| EVAL_TYPES, | |
| AutoEvalColumn, | |
| ModelType, | |
| fields, | |
| Precision | |
| ) | |
| from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN | |
| from src.populate import get_evaluation_queue_df, get_leaderboard_df | |
| from src.submission.submit import add_new_eval | |
| def restart_space(): | |
| API.restart_space(repo_id=REPO_ID) | |
| ### Space initialisation | |
| try: | |
| snapshot_download( | |
| repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN | |
| ) | |
| except Exception: | |
| restart_space() | |
| try: | |
| snapshot_download( | |
| repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN | |
| ) | |
| except Exception: | |
| restart_space() | |
| LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS) | |
| ( | |
| finished_eval_queue_df, | |
| pending_eval_queue_df, | |
| ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) | |
| def init_leaderboard(dataframe): | |
| if dataframe is None or dataframe.empty: | |
| raise ValueError("Leaderboard DataFrame is empty or None.") | |
| with gr.Tabs(elem_classes="leaderboard-tabs") as leaderboard_tabs: | |
| for testing_type in Testing_Type: | |
| with gr.TabItem("Average Scores" if testing_type.value == "avg" else testing_type.name, elem_id=f"{testing_type.value}_Leaderboard"): | |
| if testing_type.value == "avg": | |
| gr.Markdown("The scores presented in this tab are averaged scores across all datasets.") | |
| try: | |
| leaderboard = Leaderboard( | |
| value=dataframe[dataframe["Testing Type"] == testing_type.name], | |
| datatype=[c.type for c in fields(AutoEvalColumn)], | |
| select_columns=SelectColumns( | |
| default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default], | |
| cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden], | |
| label="Select Columns to Display:", | |
| ), | |
| search_columns=[AutoEvalColumn.model_name.name], | |
| hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden], | |
| filter_columns=[ | |
| ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"), | |
| ColumnFilter(AutoEvalColumn.training_dataset_type.name, type="checkboxgroup", label="Training Dataset"), | |
| ColumnFilter( | |
| AutoEvalColumn.model_parameters.name, | |
| type="slider", | |
| min=0, | |
| max=10000, | |
| default=["0", "100"], | |
| label="Select the number of parameters (M)", | |
| ), | |
| ], | |
| bool_checkboxgroup_label="Hide Models", | |
| interactive=False, | |
| ) | |
| except: | |
| gr.Markdown("There are no submissions for this testing type yet.") | |
| def init_submissions(): | |
| with gr.Column(): | |
| with gr.Row(): | |
| gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") | |
| with gr.Column(): | |
| with gr.Accordion( | |
| f"β Finished Evaluations ({len(finished_eval_queue_df)})", | |
| open=False, | |
| ): | |
| with gr.Row(): | |
| finished_eval_table = gr.components.Dataframe( | |
| value=finished_eval_queue_df, | |
| headers=EVAL_COLS, | |
| datatype=EVAL_TYPES, | |
| row_count=5, | |
| ) | |
| with gr.Accordion( | |
| f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})", | |
| open=False, | |
| ): | |
| with gr.Row(): | |
| pending_eval_table = gr.components.Dataframe( | |
| value=pending_eval_queue_df, | |
| headers=EVAL_COLS, | |
| datatype=EVAL_TYPES, | |
| row_count=5, | |
| ) | |
| with gr.Row(): | |
| gr.Markdown("# βοΈβ¨ Submit your model here!", elem_classes="markdown-text") | |
| with gr.Row(): | |
| with gr.Column(): | |
| model_name_textbox = gr.Textbox(label="Model name") | |
| model_link_textbox = gr.Textbox(label="Link to Model") | |
| model_backbone_textbox = gr.Dropdown( | |
| choices=["Original"], | |
| label="Model Backbone", | |
| value="Original", | |
| allow_custom_value=True, | |
| ) | |
| model_parameter_number = gr.Number(label="Model Parameter Count (M)", precision=1, minimum=0) | |
| precision = gr.Dropdown( | |
| choices=[i.name for i in Precision], | |
| label="Precision", | |
| multiselect=False, | |
| value="float32", | |
| interactive=True, | |
| ) | |
| paper_name_textbox = gr.Textbox(label="Paper Name") | |
| paper_link_textbox = gr.Textbox(label="Link To Paper") | |
| with gr.Column(): | |
| training_dataset = gr.Dropdown( | |
| choices=[i.value for i in Training_Dataset if i.value != Training_Dataset.Other.value], | |
| label="Training Dataset", | |
| multiselect=False, | |
| value=Training_Dataset.XCL.value, | |
| interactive=True, | |
| allow_custom_value=True, | |
| ) | |
| testing_type = gr.Dropdown( | |
| choices=[i.name for i in Testing_Type], | |
| label="Tested on", | |
| multiselect=False, | |
| value=Testing_Type.AVG.name, | |
| interactive=True, | |
| ) | |
| cmap_value = gr.Number(label="cmAP Performance", precision=2, minimum=0.00, maximum=1.00, step=0.01) | |
| auroc_value = gr.Number(label="AUROC Performance", precision=2, minimum=0.00, maximum=1.00, step=0.01) | |
| t1acc_value = gr.Number(label="T1-Acc Performance", precision=2, minimum=0.00, maximum=1.00, step=0.01) | |
| submit_button = gr.Button("Submit Eval") | |
| submission_result = gr.Markdown() | |
| submit_button.click( | |
| fn=add_new_eval, | |
| inputs=[ | |
| model_name_textbox, | |
| model_link_textbox, | |
| model_backbone_textbox, | |
| precision, | |
| model_parameter_number, | |
| paper_name_textbox, | |
| paper_link_textbox, | |
| training_dataset, | |
| testing_type, | |
| cmap_value, | |
| auroc_value, | |
| t1acc_value, | |
| ], | |
| outputs=submission_result, | |
| ) | |
| demo = gr.Blocks(css=custom_css) | |
| with demo: | |
| gr.HTML(TITLE) | |
| gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") | |
| with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
| with gr.TabItem("π Leaderboard", elem_id="leaderboard-tab-table", id=0): | |
| init_leaderboard(LEADERBOARD_DF) | |
| with gr.TabItem("π About", elem_id="leaderboard-tab-table", id=2): | |
| gr.Markdown(ABOUT_TEXT, elem_classes="markdown-text") | |
| with gr.TabItem("π Submit here! ", elem_id="leaderboard-tab-table", id=3): | |
| init_submissions() | |
| with gr.Row(): | |
| with gr.Accordion("π Citation", open=False): | |
| citation_button = gr.Textbox( | |
| value=CITATION_BUTTON_TEXT, | |
| label=CITATION_BUTTON_LABEL, | |
| lines=20, | |
| elem_id="citation-button", | |
| show_copy_button=True, | |
| ) | |
| scheduler = BackgroundScheduler() | |
| scheduler.add_job(restart_space, "interval", seconds=1800) | |
| scheduler.start() | |
| demo.launch() | |