Spaces:
Runtime error
Runtime error
Commit ·
1652e87
1
Parent(s): 5d2b8e6
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from huggingface_hub import HfApi
|
| 4 |
+
from utils import ascending_metrics, metric_ranges, CV11_LANGUAGES, FLEURS_LANGUAGES
|
| 5 |
+
import numpy as np
|
| 6 |
+
from st_aggrid import AgGrid, GridOptionsBuilder, JsCode
|
| 7 |
+
from os.path import exists
|
| 8 |
+
import threading
|
| 9 |
+
|
| 10 |
+
st.set_page_config(layout="wide")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_model_infos():
|
| 14 |
+
api = HfApi()
|
| 15 |
+
model_infos = api.list_models(filter="model-index", cardData=True)
|
| 16 |
+
return model_infos
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def parse_metric_value(value):
|
| 20 |
+
if isinstance(value, str):
|
| 21 |
+
"".join(value.split("%"))
|
| 22 |
+
try:
|
| 23 |
+
value = float(value)
|
| 24 |
+
except: # noqa: E722
|
| 25 |
+
value = None
|
| 26 |
+
elif isinstance(value, list):
|
| 27 |
+
if len(value) > 0:
|
| 28 |
+
value = value[0]
|
| 29 |
+
else:
|
| 30 |
+
value = None
|
| 31 |
+
value = round(value, 4) if isinstance(value, float) else None
|
| 32 |
+
return value
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def parse_metrics_rows(meta, only_verified=False):
|
| 36 |
+
if not isinstance(meta["model-index"], list) or len(meta["model-index"]) == 0 or "results" not in meta["model-index"][0]:
|
| 37 |
+
return None
|
| 38 |
+
for result in meta["model-index"][0]["results"]:
|
| 39 |
+
if not isinstance(result, dict) or "dataset" not in result or "metrics" not in result or "type" not in result["dataset"]:
|
| 40 |
+
continue
|
| 41 |
+
dataset = result["dataset"]["type"]
|
| 42 |
+
if dataset == "":
|
| 43 |
+
continue
|
| 44 |
+
row = {"dataset": dataset, "split": "-unspecified-", "config": "-unspecified-"}
|
| 45 |
+
if "split" in result["dataset"]:
|
| 46 |
+
row["split"] = result["dataset"]["split"]
|
| 47 |
+
if "config" in result["dataset"]:
|
| 48 |
+
row["config"] = result["dataset"]["config"]
|
| 49 |
+
no_results = True
|
| 50 |
+
incorrect_results = False
|
| 51 |
+
for metric in result["metrics"]:
|
| 52 |
+
name = metric["type"].lower().strip()
|
| 53 |
+
|
| 54 |
+
if name in ("model_id", "dataset", "split", "config", "pipeline_tag", "only_verified"):
|
| 55 |
+
# Metrics are not allowed to be named "dataset", "split", "config", "pipeline_tag"
|
| 56 |
+
continue
|
| 57 |
+
value = parse_metric_value(metric.get("value", None))
|
| 58 |
+
if value is None:
|
| 59 |
+
continue
|
| 60 |
+
if name in row:
|
| 61 |
+
new_metric_better = value < row[name] if name in ascending_metrics else value > row[name]
|
| 62 |
+
if name not in row or new_metric_better:
|
| 63 |
+
# overwrite the metric if the new value is better.
|
| 64 |
+
|
| 65 |
+
if only_verified:
|
| 66 |
+
if "verified" in metric and metric["verified"]:
|
| 67 |
+
no_results = False
|
| 68 |
+
row[name] = value
|
| 69 |
+
if name in metric_ranges:
|
| 70 |
+
if value < metric_ranges[name][0] or value > metric_ranges[name][1]:
|
| 71 |
+
incorrect_results = True
|
| 72 |
+
else:
|
| 73 |
+
no_results = False
|
| 74 |
+
row[name] = value
|
| 75 |
+
if name in metric_ranges:
|
| 76 |
+
if value < metric_ranges[name][0] or value > metric_ranges[name][1]:
|
| 77 |
+
incorrect_results = True
|
| 78 |
+
if no_results or incorrect_results:
|
| 79 |
+
continue
|
| 80 |
+
yield row
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@st.cache(ttl=0)
|
| 84 |
+
def get_data_wrapper():
|
| 85 |
+
def get_data(dataframe=None, verified_dataframe=None):
|
| 86 |
+
data = []
|
| 87 |
+
verified_data = []
|
| 88 |
+
print("getting model infos")
|
| 89 |
+
model_infos = get_model_infos()
|
| 90 |
+
print("got model infos")
|
| 91 |
+
for model_info in model_infos:
|
| 92 |
+
meta = model_info.cardData
|
| 93 |
+
if meta is None:
|
| 94 |
+
continue
|
| 95 |
+
for row in parse_metrics_rows(meta):
|
| 96 |
+
if row is None:
|
| 97 |
+
continue
|
| 98 |
+
row["model_id"] = model_info.id
|
| 99 |
+
row["pipeline_tag"] = model_info.pipeline_tag
|
| 100 |
+
row["only_verified"] = False
|
| 101 |
+
data.append(row)
|
| 102 |
+
for row in parse_metrics_rows(meta, only_verified=True):
|
| 103 |
+
if row is None:
|
| 104 |
+
continue
|
| 105 |
+
row["model_id"] = model_info.id
|
| 106 |
+
row["pipeline_tag"] = model_info.pipeline_tag
|
| 107 |
+
row["only_verified"] = True
|
| 108 |
+
data.append(row)
|
| 109 |
+
dataframe = pd.DataFrame.from_records(data)
|
| 110 |
+
dataframe.to_pickle("cache.pkl")
|
| 111 |
+
|
| 112 |
+
if exists("cache.pkl"):
|
| 113 |
+
# If we have saved the results previously, call an asynchronous process
|
| 114 |
+
# to fetch the results and update the saved file. Don't make users wait
|
| 115 |
+
# while we fetch the new results. Instead, display the old results for
|
| 116 |
+
# now. The new results should be loaded when this method
|
| 117 |
+
# is called again.
|
| 118 |
+
dataframe = pd.read_pickle("cache.pkl")
|
| 119 |
+
t = threading.Thread(name="get_data procs", target=get_data)
|
| 120 |
+
t.start()
|
| 121 |
+
else:
|
| 122 |
+
# We have to make the users wait during the first startup of this app.
|
| 123 |
+
get_data()
|
| 124 |
+
dataframe = pd.read_pickle("cache.pkl")
|
| 125 |
+
|
| 126 |
+
return dataframe
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
dataframe = get_data_wrapper()
|
| 130 |
+
|
| 131 |
+
st.markdown("# 🤗 Whisper Event: Final Leaderboard")
|
| 132 |
+
|
| 133 |
+
# query params are used to refine the browser URL as more options are selected
|
| 134 |
+
query_params = st.experimental_get_query_params()
|
| 135 |
+
if "first_query_params" not in st.session_state:
|
| 136 |
+
st.session_state.first_query_params = query_params
|
| 137 |
+
first_query_params = st.session_state.first_query_params
|
| 138 |
+
|
| 139 |
+
# define the scope of the leaderboard
|
| 140 |
+
only_verified_results = False
|
| 141 |
+
task = "automatic-speech-recognition"
|
| 142 |
+
selectable_datasets = ["mozilla-foundation/common_voice_11_0", "google/fleurs"]
|
| 143 |
+
dataset_mapping = {"mozilla-foundation/common_voice_11_0": "Common Voice 11", "google/fleurs": "FLEURS"} # get a 'pretty' name for our datasets
|
| 144 |
+
split = "test"
|
| 145 |
+
selectable_metrics = ["wer", "cer"]
|
| 146 |
+
default_metric = selectable_metrics[0]
|
| 147 |
+
|
| 148 |
+
# select dataset from list provided
|
| 149 |
+
dataset = st.sidebar.selectbox(
|
| 150 |
+
"Dataset",
|
| 151 |
+
selectable_datasets,
|
| 152 |
+
help="Select a dataset to see the leaderboard!"
|
| 153 |
+
)
|
| 154 |
+
dataset_name = dataset_mapping[dataset]
|
| 155 |
+
|
| 156 |
+
# slice dataframe to entries of interest
|
| 157 |
+
dataframe = dataframe[dataframe.only_verified == only_verified_results]
|
| 158 |
+
dataset_df = dataframe[dataframe.dataset == dataset]
|
| 159 |
+
dataset_df = dataset_df[dataset_df.split == split] # hardcoded to "test"
|
| 160 |
+
dataset_df = dataset_df.dropna(axis="columns", how="all")
|
| 161 |
+
|
| 162 |
+
# get potential dataset configs (languages)
|
| 163 |
+
selectable_configs = list(set(dataset_df["config"]))
|
| 164 |
+
selectable_configs.sort(key=lambda name: name.lower())
|
| 165 |
+
|
| 166 |
+
if "-unspecified-" in selectable_configs:
|
| 167 |
+
selectable_configs.remove("-unspecified-")
|
| 168 |
+
|
| 169 |
+
if dataset == "mozilla-foundation/common_voice_11_0":
|
| 170 |
+
selectable_configs = [config for config in selectable_configs if config in CV11_LANGUAGES]
|
| 171 |
+
visual_configs = [f"{config}: {CV11_LANGUAGES[config]}" for config in selectable_configs]
|
| 172 |
+
elif dataset == "google/fleurs":
|
| 173 |
+
selectable_configs = [config for config in selectable_configs if config in FLEURS_LANGUAGES]
|
| 174 |
+
visual_configs = [f"{config}: {FLEURS_LANGUAGES[config]}" for config in selectable_configs]
|
| 175 |
+
|
| 176 |
+
config = st.sidebar.selectbox(
|
| 177 |
+
"Language",
|
| 178 |
+
visual_configs,
|
| 179 |
+
help="Filter the results on the current leaderboard by language."
|
| 180 |
+
)
|
| 181 |
+
config, language = config.split(":")
|
| 182 |
+
|
| 183 |
+
# just for show -> we've fixed the split to "test"
|
| 184 |
+
split = st.sidebar.selectbox(
|
| 185 |
+
"Split",
|
| 186 |
+
[split],
|
| 187 |
+
index=0,
|
| 188 |
+
help="View the results for the `test` split for evaluation performance.",
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
# update browser URL with selections
|
| 192 |
+
current_query_params = {"dataset": [dataset], "config": [config], "split": split}
|
| 193 |
+
st.experimental_set_query_params(**current_query_params)
|
| 194 |
+
|
| 195 |
+
dataset_df = dataset_df[dataset_df.config == config]
|
| 196 |
+
|
| 197 |
+
dataset_df = dataset_df.filter(["model_id"] + (["dataset"] if dataset == "-any-" else []) + selectable_metrics)
|
| 198 |
+
dataset_df = dataset_df.dropna(thresh=2) # Want at least two non-na values (one for model_id and one for a metric).
|
| 199 |
+
|
| 200 |
+
sorting_metric = st.sidebar.radio(
|
| 201 |
+
"Sorting Metric",
|
| 202 |
+
selectable_metrics,
|
| 203 |
+
index=selectable_metrics.index(default_metric) if default_metric in selectable_metrics else 0,
|
| 204 |
+
help="Select the metric to sort the leaderboard by. Click on the metric name in the leaderboard to reverse the sorting order."
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
st.markdown(
|
| 208 |
+
f"This is the leaderboard for {dataset_name} {language} ({config})."
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
st.markdown(
|
| 212 |
+
"Please click on the model's name to be redirected to its model card."
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
st.markdown(
|
| 216 |
+
"Want to beat the leaderboard? Don't see your model here? Ensure..."
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
# Make the default metric appear right after model names and dataset names
|
| 220 |
+
cols = dataset_df.columns.tolist()
|
| 221 |
+
cols.remove(sorting_metric)
|
| 222 |
+
sorting_metric_index = 1 if dataset != "-any-" else 2
|
| 223 |
+
cols = cols[:sorting_metric_index] + [sorting_metric] + cols[sorting_metric_index:]
|
| 224 |
+
dataset_df = dataset_df[cols]
|
| 225 |
+
|
| 226 |
+
# Sort the leaderboard, giving the sorting metric highest priority and then ordering by other metrics in the case of equal values.
|
| 227 |
+
dataset_df = dataset_df.sort_values(by=cols[sorting_metric_index:], ascending=[metric in ascending_metrics for metric in cols[sorting_metric_index:]])
|
| 228 |
+
dataset_df = dataset_df.replace(np.nan, '-')
|
| 229 |
+
|
| 230 |
+
# Make the leaderboard
|
| 231 |
+
gb = GridOptionsBuilder.from_dataframe(dataset_df)
|
| 232 |
+
gb.configure_default_column(sortable=False)
|
| 233 |
+
gb.configure_column(
|
| 234 |
+
"model_id",
|
| 235 |
+
cellRenderer=JsCode('''function(params) {return '<a target="_blank" href="https://huggingface.co/'+params.value+'">'+params.value+'</a>'}'''),
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
for name in selectable_metrics:
|
| 239 |
+
gb.configure_column(name, type=["numericColumn", "numberColumnFilter", "customNumericFormat"], precision=2, aggFunc='sum')
|
| 240 |
+
|
| 241 |
+
gb.configure_column(
|
| 242 |
+
sorting_metric,
|
| 243 |
+
sortable=True,
|
| 244 |
+
cellStyle=JsCode('''function(params) { return {'backgroundColor': '#FFD21E'}}''')
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
go = gb.build()
|
| 248 |
+
fit_columns = len(dataset_df.columns) < 10
|
| 249 |
+
AgGrid(dataset_df, gridOptions=go, height=28*len(dataset_df) + (35 if fit_columns else 41), allow_unsafe_jscode=True, fit_columns_on_grid_load=fit_columns, enable_enterprise_modules=False)
|