Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns | |
| import pandas as pd | |
| from apscheduler.schedulers.background import BackgroundScheduler | |
| from huggingface_hub import snapshot_download | |
| from src.about import ( | |
| CITATION_BUTTON_LABEL, | |
| CITATION_BUTTON_TEXT, | |
| EVALUATION_QUEUE_TEXT, | |
| INTRODUCTION_TEXT, | |
| CLS_BENCHMARKS_TEXT, | |
| TITLE, | |
| ) | |
| from src.display.css_html_js import custom_css | |
| from src.display.utils import ( | |
| BENCHMARK_COLS, | |
| COLS, | |
| EVAL_COLS, | |
| EVAL_TYPES, | |
| AutoEvalColumn, | |
| ModelType, | |
| fields, | |
| WeightType, | |
| Precision | |
| ) | |
| from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN | |
| from src.populate import get_leaderboard_df | |
| from src.submission.submit import process_submission | |
| def restart_space(): | |
| API.restart_space(repo_id=REPO_ID) | |
| ### Space initialisation | |
| try: | |
| print(EVAL_REQUESTS_PATH) | |
| snapshot_download( | |
| repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN, | |
| ignore_patterns=["*.csv"] | |
| ) | |
| except Exception: | |
| restart_space() | |
| try: | |
| print(EVAL_RESULTS_PATH) | |
| snapshot_download( | |
| repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN, | |
| ignore_patterns=["*.csv"] | |
| ) | |
| except Exception: | |
| restart_space() | |
| os.makedirs(EVAL_REQUESTS_PATH, exist_ok=True) | |
| os.makedirs(EVAL_RESULTS_PATH, exist_ok=True) | |
| LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS) | |
| def init_leaderboard(dataframe): | |
| if dataframe is None or dataframe.empty: | |
| print("Initializing empty leaderboard") | |
| return Leaderboard( | |
| value=pd.DataFrame(columns=[c.name for c in fields(AutoEvalColumn)]), | |
| search_columns=['Model Name'], | |
| interactive=True | |
| ) | |
| else: | |
| print("Initializing leaderboard with data") | |
| return Leaderboard( | |
| value=dataframe, | |
| datatype=[c.type for c in fields(AutoEvalColumn)], | |
| search_columns=['Model Name'], | |
| hide_columns=['Student ID', 'eval_name'], | |
| interactive=False | |
| ) | |
| demo = gr.Blocks(css=custom_css) | |
| with demo: | |
| gr.HTML(TITLE) | |
| gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") | |
| with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
| with gr.TabItem("π Performance Benchmark", elem_id="benchmark-tab-table", id=0): | |
| leaderboard = init_leaderboard(LEADERBOARD_DF) | |
| with gr.TabItem("π About", elem_id="benchmark-tab-table", id=2): | |
| gr.Markdown(CLS_BENCHMARKS_TEXT, elem_classes="markdown-text") | |
| with gr.TabItem("π Submit here! ", elem_id="benchmark-tab-table", id=3): | |
| with gr.Column(): | |
| gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") | |
| gr.Markdown("## Submit Your Results") | |
| with gr.Row(): | |
| student_id = gr.Textbox(label="Student ID", value='455') | |
| model_name = gr.Textbox(label="Model Name", value='pixelCNN++') | |
| csv_upload = gr.UploadButton( | |
| label="Upload Predictions CSV", | |
| file_types=[".csv"], | |
| file_count="single" | |
| ) | |
| submit_button = gr.Button("Submit Results") | |
| submission_result = gr.Markdown() | |
| submit_button.click( | |
| process_submission, | |
| inputs=[student_id, model_name, csv_upload], | |
| outputs=submission_result | |
| ) | |
| scheduler = BackgroundScheduler() | |
| scheduler.add_job(restart_space, "interval", seconds=1800) | |
| scheduler.start() | |
| demo.queue(default_concurrency_limit=40).launch() | |