secben / app.py
lthc's picture
remove height
a7219e6
import os
import gradio as gr
# from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
try:
from commutil import dbg
except:
dbg = print
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import BENCHMARK_COLS, COLS, EVAL_COLS, EVAL_TYPES, TYPES, AutoEvalColumn, ModelType, fields, WeightType, Precision
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN, EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
def restart_space():
API.restart_space(repo_id=REPO_ID)
def uncheck_all():
return [], [], [], [], [], [], []
# Searching and filtering
def update_table(
hidden_df: pd.DataFrame,
# en_shown_columns_TAD: list,
# en_shown_columns_ACM: list,
# en_shown_columns_KUT: list,
# en_shown_columns_NLP: list,
# en_shown_columns_FQA: list,
# en_shown_columns_CDP: list,
# shown_columns2: list,
shown_columns_cyberkut: list,
shown_columns_cybernlp: list,
shown_columns_cyberdsa: list,
shown_columns: list,
filter_columns_type: list,
filter_columns_precision: list,
filter_columns_size: list,
show_deleted: bool,
search_bar: str,
):
# dbg(shown_columns)
# return hidden_df
# Combine all column selections
# selected_columns = shown_columns
# selected_columns = (en_shown_columns_TAD + en_shown_columns_ACM + en_shown_columns_KUT + en_shown_columns_NLP + en_shown_columns_FQA + en_shown_columns_CDP + shown_columns2)
selected_columns = shown_columns_cyberkut + shown_columns_cybernlp + shown_columns_cyberdsa + shown_columns
# Filter models based on queries
filtered_df = filter_models(hidden_df, filter_columns_type, filter_columns_size, filter_columns_precision, show_deleted)
filtered_df = filter_queries(search_bar, filtered_df)
# print(filtered_df)
df = select_columns(filtered_df, selected_columns)
# dbg(df)
return df
def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
final_df = []
if query != "":
queries = [q.strip() for q in query.split(";")]
for _q in queries:
_q = _q.strip()
if _q != "":
temp_filtered_df = search_table(filtered_df, _q)
if len(temp_filtered_df) > 0:
final_df.append(temp_filtered_df)
if len(final_df) > 0:
filtered_df = pd.concat(final_df)
filtered_df = filtered_df.drop_duplicates(subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name])
return filtered_df
def filter_models(df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool) -> pd.DataFrame:
# return df
# Show all models
if show_deleted:
filtered_df = df
else:
filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
if "All" not in type_query:
if "?" in type_query:
filtered_df = filtered_df.loc[~df[AutoEvalColumn.model_type_symbol.name].isin([t for t in ModelType if t != "?"])]
else:
type_emoji = [t[0] for t in type_query]
filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
if "All" not in precision_query:
if "?" in precision_query:
filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isna()]
else:
filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
if "All" not in size_query:
filtered_df = df
# if "?" in size_query:
# filtered_df = filtered_df.loc[df[AutoEvalColumn.params.name].isna()]
# else:
# numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
# params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
# mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
# filtered_df = filtered_df.loc[mask]
return filtered_df
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
return df[(df[AutoEvalColumn.model.name].str.contains(query, case=False))]
def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
# return df
always_here_cols = [
AutoEvalColumn.model_type_symbol.name,
AutoEvalColumn.model.name,
]
# Ensure no duplicates and add the new average columns
unique_columns = set(always_here_cols + columns)
# We use COLS to maintain sorting
filtered_df = df[[c for c in COLS if c in df.columns and c in unique_columns]]
# Debugging print to see if the new columns are included
# print(f"Columns included in DataFrame: {filtered_df.columns.tolist()}")
# dbg(filtered_df)
return filtered_df
## Space initialisation
if not os.getenv("ENVIRONMENT"):
try:
print(EVAL_REQUESTS_PATH)
snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN)
except Exception:
restart_space()
# LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
# LEADERBOARD_DF.rename(columns={
# '4ใ€malicious_URL_acc': 'mali',
# })
LEADERBOARD_DF_CP = LEADERBOARD_DF.copy()
# dbg(LEADERBOARD_DF)
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
def init_leaderboard():
pass
# if dataframe is None or dataframe.empty:
# raise ValueError("Leaderboard DataFrame is empty or None.")
# print(dataframe)
# return Leaderboard(
# value=dataframe,
# datatype=[c.type for c in fields(AutoEvalColumn)],
# select_columns=SelectColumns(
# default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
# cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
# label="Select Columns to Display:",
# ),
# search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
# hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
# filter_columns=[
# ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
# ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
# ColumnFilter(
# AutoEvalColumn.params.name,
# type="slider",
# min=0.01,
# max=150,
# label="Select the number of parameters (B)",
# ),
# # ColumnFilter(
# # AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
# # ),
# ],
# bool_checkboxgroup_label="Hide models",
# interactive=False,
# )
with gr.Column():
# with gr.Row():
# with gr.Column():
search_bar = gr.Textbox(
placeholder=" ๐Ÿ” Search for your model (separate multiple queries with `;`) and press ENTER...",
show_label=False,
elem_id="search-bar",
)
# with gr.Column(min_width=320):
# with gr.Box(elem_id="box-filter"):
with gr.Row():
filter_columns_type = gr.CheckboxGroup(
label="Model types",
choices=["All"] + [t.to_str() for t in ModelType],
value=["All"],
interactive=True,
elem_id="filter-columns-type",
)
filter_columns_precision = gr.CheckboxGroup(
label="Precision",
choices=["All"] + [i.value.name for i in Precision],
value=["All"],
interactive=True,
elem_id="filter-columns-precision",
)
# with gr.Row():
# deleted_models_visibility = gr.Checkbox(
# value=True, label="Show gated/private/deleted models", interactive=True
# )
# with gr.Row():
# with gr.Accordion("Select columns to show"):
# with gr.Tab("Other"):
with gr.Accordion("Select columns to show"):
choice_list = ["Overall", "KUT", "NLP", "DSA", "CLS", "GEN", "REA"]
shown_columns = gr.CheckboxGroup(
choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in choice_list)],
value=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in choice_list)],
# choices=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden],
# value=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden],
label="Select Columns to Display:",
interactive=True,
)
# with gr.Tab("CyberTAD"):
# datasets = ["4", "6"]
# en_shown_columns_TAD = gr.CheckboxGroup(
# choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# interactive=True
# )
# with gr.Tab("CyberACM"):
# datasets = ["1", "2"]
# en_shown_columns_ACM = gr.CheckboxGroup(
# choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# interactive=True
# )
# with gr.Tab("CyberKUT"):
# datasets = ["8"]
# en_shown_columns_KUT = gr.CheckboxGroup(
# choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# interactive=True
# )
# with gr.Tab("CyberNLP"):
# datasets = ["9", "3"]
# en_shown_columns_NLP = gr.CheckboxGroup(
# choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# interactive=True
# )
# with gr.Tab("CyberFQA"):
# datasets = ["11", "13"]
# en_shown_columns_FQA = gr.CheckboxGroup(
# choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# interactive=True
# )
# with gr.Tab("CyberCDP"):
# datasets = ["10"]
# en_shown_columns_CDP = gr.CheckboxGroup(
# choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.name not in ["Model", "T"] and any(x in c.name for x in datasets)],
# interactive=True
# )
# dbg(fields(AutoEvalColumn))
# dbg([c.name for c in fields(AutoEvalColumn)])
# dbg([c.name for c in fields(AutoEvalColumn) if any(x in c.name for x in ["Averge"])])
with gr.Tab("CyberKUT"):
datasets = ["NetQA", "Embed", "Metric", "CodeQA"]
shown_columns_cyberkut = gr.CheckboxGroup(choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)], value=[], interactive=True)
with gr.Tab("CyberNLP"):
datasets = ["Corpus", "CDTier", "NER", "HackerNews"]
shown_columns_cybernlp = gr.CheckboxGroup(choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)], value=[], interactive=True)
with gr.Tab("CyberDSA"):
datasets = ["MaliURLs", "CSIC2010", "BETH", "MITRE"]
shown_columns_cyberdsa = gr.CheckboxGroup(choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"] and any(x in c.name for x in datasets)], value=[], interactive=True)
# with gr.Accordion("[zh] Select columns to show"):
# shown_columns2 = gr.CheckboxGroup(
# choices=[c.name for c in fields(AutoEvalColumn) if c.name not in ["Model", "T"]],
# value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.name not in ["Model", "T"]],
# # choices=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden],
# # value=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden],
# label="Select Columns to Display:",
# interactive=True,
# )
# shown_columns = [en_shown_columns_TAD, en_shown_columns_ACM, en_shown_columns_KUT, en_shown_columns_NLP, en_shown_columns_FQA, en_shown_columns_CDP, shown_columns2]
shown_columns = [shown_columns_cyberkut, shown_columns_cybernlp, shown_columns_cyberdsa, shown_columns]
# dbg(shown_columns, "before")
# with gr.Row():
uncheck_all_button = gr.Button("Uncheck All")
uncheck_all_button.click(
uncheck_all,
inputs=[],
outputs=[
*shown_columns,
],
)
# with gr.Row():
deleted_models_visibility = gr.Checkbox(value=True, label="Show gated/private/deleted models", interactive=True, visible=False)
filter_columns_size = gr.CheckboxGroup(
label="Model sizes (in billions of parameters)",
# choices=["All"] + list(NUMERIC_INTERVALS.keys()) + ["?"],
choices=["All"] + ["?"],
value=["All"],
interactive=True,
elem_id="filter-columns-size",
visible=False,
)
# # TODO ๆทปๅŠ ๆ–ฐๅˆ—็š„้กบๅบ
# leaderboard_table = gr.Dataframe(
# value=LEADERBOARD_DF_CP[[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + ["Averge KUTโฌ†๏ธ", "Averge NLPโฌ†๏ธ", "Averge DSAโฌ†๏ธ"] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden]],
# headers=[[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + ["Averge KUTโฌ†๏ธ", "Averge NLPโฌ†๏ธ", "Averge DSAโฌ†๏ธ"] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden]],
# datatype=TYPES,
# elem_id="leaderboard-table",
# interactive=False,
# visible=True,
# )
# dbg([(c.name, c.never_hidden) for c in fields(AutoEvalColumn)])
# dbg([c.name for c in fields(AutoEvalColumn) if c.never_hidden] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden])
leaderboard_table = gr.Dataframe(
value=LEADERBOARD_DF_CP[[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden]],
headers=[[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.never_hidden]],
datatype=TYPES,
elem_id="leaderboard-table",
interactive=False,
visible=True,
# height=800,
# min_width=120,
)
# dbg(len(LEADERBOARD_DF))
# print(f"[DEBUG]---------- COLS :", COLS)
# print(f"[DEBUG]---------- TYPES :", TYPES)
# Dummy leaderboard for handling the case when the user uses backspace key
hidden_leaderboard_table_for_search = gr.Dataframe(
value=LEADERBOARD_DF[COLS],
headers=COLS,
datatype=TYPES,
visible=False,
)
search_bar.submit(
update_table,
inputs=[
hidden_leaderboard_table_for_search,
*shown_columns,
filter_columns_type,
filter_columns_precision,
filter_columns_size,
deleted_models_visibility,
search_bar,
],
outputs=leaderboard_table,
queue=True,
)
for selector in [*shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility, filter_columns_size]:
selector.change(
update_table,
inputs=[
hidden_leaderboard_table_for_search,
*shown_columns,
filter_columns_type,
filter_columns_precision,
filter_columns_size,
deleted_models_visibility,
search_bar,
],
outputs=leaderboard_table,
queue=True,
)
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("๐Ÿ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
init_leaderboard()
with gr.TabItem("๐Ÿ“ About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.TabItem("๐Ÿš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Column():
with gr.Accordion(
f"โœ… Finished Evaluations ({len(finished_eval_queue_df)})",
open=False,
):
with gr.Row():
finished_eval_table = gr.components.Dataframe(
value=finished_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"๐Ÿ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
open=False,
):
with gr.Row():
running_eval_table = gr.components.Dataframe(
value=running_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"โณ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
open=False,
):
with gr.Row():
pending_eval_table = gr.components.Dataframe(
value=pending_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Row():
gr.Markdown("# โœ‰๏ธโœจ Submit your model here!", elem_classes="markdown-text")
with gr.Row():
with gr.Column():
model_name_textbox = gr.Textbox(label="Model name")
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
model_type = gr.Dropdown(
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
label="Model type",
multiselect=False,
value=None,
interactive=True,
)
with gr.Column():
precision = gr.Dropdown(
choices=[i.value.name for i in Precision if i != Precision.Unknown],
label="Precision",
multiselect=False,
value="float16",
interactive=True,
)
weight_type = gr.Dropdown(
choices=[i.value.name for i in WeightType],
label="Weights type",
multiselect=False,
value="Original",
interactive=True,
)
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
submit_button = gr.Button("Submit Eval")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
model_name_textbox,
base_model_name_textbox,
revision_name_textbox,
precision,
weight_type,
model_type,
],
submission_result,
)
with gr.Row():
with gr.Accordion("๐Ÿ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
show_copy_button=True,
)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()