paza-bench / app.py
muchai-mercy's picture
update pazabench space
0aa9a49
import os
from typing import Any
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_LANGUAGE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
LLM_BENCHMARKS_DATASETS_TEXT,
get_dataset_group_label,
)
from src.constants import (
ASR_DISPLAY_COLUMNS,
FILTER_COLUMN_ORDER,
FILTER_PARAM_MAP,
INTERPRETATIONS,
METRIC_CONFIGS,
)
from src.display.css_html_js import (
custom_css,
fix_aria_hidden_focus_js,
light_mode_head,
chart_gallery_html,
section_divider_html,
footer_html,
)
from src.display.utils import EVAL_COLS
from src.language_metadata import get_all_regions
from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, REPO_ID, TOKEN, HF_ENABLED
from src.populate import get_evaluation_queue_df
from src.submission.submit import add_language_eval_request
from src.visualizations import (
create_model_leaderboard,
create_cer_leaderboard,
create_speed_accuracy_scatter,
create_wer_cer_correlation,
create_language_coverage_chart,
create_language_location_map,
get_language_sample_info,
get_all_languages,
)
from src.data_processing import (
load_asr_results,
get_filter_options,
filter_asr_dataframe,
compute_metric_tables,
compute_metric_dataframes,
strip_dataset_labels,
VIEW_MODE_COLUMNS,
DEFAULT_VIEW_MODE,
)
from src.telemetry import (
track_event,
track_filter_change,
track_visualization_view,
track_error,
)
# App-specific constants
SORT_DIRECTION_CHOICES = ["Ascending", "Descending"]
DEFAULT_SORT_DIRECTION = "Ascending"
def _sorted_column_values(frame: pd.DataFrame, column: str) -> list[str]:
"""Get sorted unique values from a column."""
if column not in frame.columns or frame.empty:
return []
values = sorted({value for value in frame[column].dropna().unique() if value != "Unknown"})
if (frame[column] == "Unknown").any():
values.append("Unknown")
return values
def update_metric_tables(models, languages, dataset_groups, view_mode, african_regions=None):
"""Update HTML metric tables based on current filters."""
clean_dataset_groups = strip_dataset_labels(dataset_groups)
tables = compute_metric_tables(
models or [],
languages or [],
clean_dataset_groups,
view_mode,
african_regions=african_regions or [],
asr_results_df=ASR_RESULTS_DF,
)
return [tables.get("cer", ""), tables.get("wer", ""), tables.get("rtfx", "")]
def update_metric_dataframes(models, languages, dataset_groups, view_mode, african_regions=None):
"""Update metric dataframes for interactive sorting."""
clean_dataset_groups = strip_dataset_labels(dataset_groups)
dataframes = compute_metric_dataframes(
models or [],
languages or [],
clean_dataset_groups,
view_mode,
african_regions=african_regions or [],
asr_results_df=ASR_RESULTS_DF,
)
return [
dataframes.get("cer", pd.DataFrame()),
dataframes.get("wer", pd.DataFrame()),
dataframes.get("rtfx", pd.DataFrame())
]
def refresh_filter_dropdowns(
models: list[str] | None,
languages: list[str] | None,
dataset_groups: list[str] | None,
african_regions: list[str] | None = None,
):
"""Refresh filter dropdown options based on current selections."""
if ASR_RESULTS_DF.empty:
empty_update = gr.update(choices=[], value=None)
return [empty_update] * (len(FILTER_COLUMN_ORDER) + 1)
clean_dataset_groups = strip_dataset_labels(dataset_groups) if dataset_groups else []
selections = {
"models": list(models or []),
"languages": list(languages or []),
"dataset_groups": clean_dataset_groups,
}
updates: list[Any] = []
for column in FILTER_COLUMN_ORDER:
param = FILTER_PARAM_MAP[column]
filter_kwargs = {key: list(value) for key, value in selections.items()}
filter_kwargs[param] = []
filtered = filter_asr_dataframe(
ASR_RESULTS_DF,
models=filter_kwargs["models"],
languages=filter_kwargs["languages"],
dataset_groups=filter_kwargs["dataset_groups"],
african_regions=african_regions,
)
options = _sorted_column_values(filtered, column)
current_selection = [value for value in selections[param] if value in options]
if column == "dataset_group":
labeled_options = [get_dataset_group_label(dg) for dg in options]
labeled_selection = [get_dataset_group_label(dg) for dg in current_selection] if current_selection else None
updates.append(gr.update(choices=labeled_options, value=labeled_selection))
else:
updates.append(gr.update(choices=options, value=current_selection or None))
updates.append(gr.update(choices=get_all_regions(), value=african_regions or None))
return updates
# Initialize data
try:
ASR_RESULTS_DF = load_asr_results()
ASR_RESULTS_ERROR: str | None = None
except FileNotFoundError as err:
ASR_RESULTS_DF = pd.DataFrame(columns=ASR_DISPLAY_COLUMNS)
ASR_RESULTS_ERROR = str(err)
track_error(err, {"source": "results_csv"}, context="data_load")
ASR_FILTER_OPTIONS = get_filter_options(ASR_RESULTS_DF) if ASR_RESULTS_ERROR is None else {
"model": [],
"language": [],
"dataset_group": [],
"dataset_group_labels": [],
"african_region": [],
}
if ASR_RESULTS_ERROR is None:
ASR_INITIAL_TABLES = compute_metric_tables([], [], [], DEFAULT_VIEW_MODE, asr_results_df=ASR_RESULTS_DF)
ASR_INITIAL_DATAFRAMES = compute_metric_dataframes([], [], [], DEFAULT_VIEW_MODE, asr_results_df=ASR_RESULTS_DF)
else:
ASR_INITIAL_TABLES = {metric: f"<p class='metric-table-empty'>⚠️ {ASR_RESULTS_ERROR}</p>" for metric in METRIC_CONFIGS}
ASR_INITIAL_DATAFRAMES = {metric: pd.DataFrame() for metric in METRIC_CONFIGS}
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
def restart_space():
"""Restart the HuggingFace Space (only if HF is enabled)."""
if HF_ENABLED and API:
try:
API.restart_space(repo_id=REPO_ID)
except Exception as e:
print(f"Could not restart space: {e}")
else:
print("HuggingFace Hub disabled, skipping space restart")
### Space initialisation
if HF_ENABLED:
try:
print(f"Downloading requests from {QUEUE_REPO}...")
print(EVAL_REQUESTS_PATH)
snapshot_download(
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
)
except Exception as e:
print(f"Could not download requests: {e}")
# restart_space()
else:
print("HuggingFace Hub disabled, skipping dataset downloads")
demo = gr.Blocks(css=custom_css, head=light_mode_head)
with demo:
# Inject accessibility fix for aria-hidden focusable elements (WCAG 4.1.2)
gr.HTML(fix_aria_hidden_focus_js)
gr.Image(
value="src/assets/PazaBench-Banner2.png",
show_label=False,
container=False,
elem_id="banner-image",
)
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("🎙️ ASR Metrics", elem_id="asr-results-tab", id=0):
if ASR_RESULTS_ERROR:
gr.Markdown(f"⚠️ {ASR_RESULTS_ERROR}")
else:
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
gr.Markdown(
"### Compare models at a glance. Use the filters below to customize your view "
"or explore the data directly in the tables.",
elem_classes="explainer-text"
)
# Filter & Customize accordion moved to top (collapsed by default)
with gr.Accordion("🔧 Filter and customise", open=False, elem_id="filter-customize-accordion"):
gr.Markdown(
"Customize the dashboard by filtering models, languages, regions, or datasets. "
"Choose whether columns show model families or individual checkpoints."
)
with gr.Row():
model_filter = gr.Dropdown(
choices=ASR_FILTER_OPTIONS["model"],
multiselect=True,
label="Model",
value=None,
allow_custom_value=False,
)
language_filter = gr.Dropdown(
choices=ASR_FILTER_OPTIONS["language"],
multiselect=True,
label="Language",
value=None,
allow_custom_value=False,
)
dataset_filter = gr.Dropdown(
choices=ASR_FILTER_OPTIONS.get("dataset_group_labels", ASR_FILTER_OPTIONS["dataset_group"]),
multiselect=True,
label="Dataset group",
value=None,
allow_custom_value=False,
)
gr.Markdown("#### 🌍 Filter by Region", elem_classes="markdown-text")
with gr.Row():
african_region_filter = gr.Dropdown(
choices=ASR_FILTER_OPTIONS.get("african_region", []),
multiselect=True,
label="African Region",
value=None,
allow_custom_value=False,
info="Filter languages by geographic region",
)
view_mode_radio = gr.Radio(
choices=list(VIEW_MODE_COLUMNS.keys()),
value=DEFAULT_VIEW_MODE,
label="Columns show",
interactive=True,
)
apply_filters_button = gr.Button("Update dashboard", variant="primary")
gr.Markdown(
"💡 **Leaderboard Guide:** Model families are **ranked from left to right by the average performance across languages**. *Click on any column header* to sort by column, *click on a language* to sort that row's models by score, *hover over truncated names* for full text.",
elem_classes="markdown-text"
)
with gr.Tabs(elem_classes="metric-tabs"):
with gr.TabItem("CER ⬇️"):
gr.Markdown("📉 **Lower is better** — Character Error Rate measures the percentage of characters incorrectly transcribed. This is especially important for languages with rich word forms, where meaning is built by combining word parts, therefore errors at the character level can significantly impact meaning.<br><br>*See 📊 Visualizations tab for CER Performance chart.*", elem_classes="metric-note")
cer_table = gr.Dataframe(
value=ASR_INITIAL_DATAFRAMES.get("cer", pd.DataFrame()),
elem_id="cer-leaderboard-table",
interactive=False,
wrap=False,
max_height=800,
)
with gr.TabItem("WER ⬇️"):
gr.Markdown("📉 **Lower is better** — Word Error Rate measures the percentage of words incorrectly transcribed.<br><br>*See 📊 Visualizations tab for WER Performance chart.*", elem_classes="metric-note")
wer_table = gr.Dataframe(
value=ASR_INITIAL_DATAFRAMES.get("wer", pd.DataFrame()),
elem_id="wer-leaderboard-table",
interactive=False,
wrap=False,
max_height=800,
)
with gr.TabItem("RTFx ⬆️"):
gr.Markdown("📈 **Higher is better** — Inverse Real-Time Factor (RTFx) measures how fast transcription runs relative to real-time audio duration.<br><br>*See 📊 Visualizations tab for Speed vs Accuracy chart.*", elem_classes="metric-note")
rtfx_table = gr.Dataframe(
value=ASR_INITIAL_DATAFRAMES.get("rtfx", pd.DataFrame()),
elem_id="rtfx-leaderboard-table",
interactive=False,
wrap=False,
max_height=800,
)
def update_metric_dataframes_with_telemetry(models, languages, dataset_groups, view_mode, african_regions):
"""Wrapper that adds telemetry tracking to filter updates."""
track_event("dashboard_update", {
# Counts for quick filtering
"models_count": len(models) if models else 0,
"languages_count": len(languages) if languages else 0,
"datasets_count": len(dataset_groups) if dataset_groups else 0,
"regions_count": len(african_regions) if african_regions else 0,
# Actual filter values (limit to prevent huge payloads)
"models": ",".join(models[:10]) if models else "",
"languages": ",".join(languages[:10]) if languages else "",
"dataset_groups": ",".join(dataset_groups[:10]) if dataset_groups else "",
"african_regions": ",".join(african_regions[:10]) if african_regions else "",
"view_mode": view_mode or "",
"tab": "asr_metrics",
})
return update_metric_dataframes(models, languages, dataset_groups, view_mode, african_regions)
apply_filters_button.click(
update_metric_dataframes_with_telemetry,
inputs=[
model_filter,
language_filter,
dataset_filter,
view_mode_radio,
african_region_filter,
],
outputs=[cer_table, wer_table, rtfx_table],
)
with gr.TabItem("📊 Visualizations", elem_id="visualizations-tab", id=1):
if ASR_RESULTS_ERROR:
gr.Markdown(f"⚠️ {ASR_RESULTS_ERROR}")
else:
gr.Markdown(
"### Explore key visualizations revealing insights about model performance, "
"speed-accuracy tradeoffs, and cross-metric correlations across the benchmark.",
elem_classes="markdown-text"
)
# Chart Gallery - Quick Navigation
gr.Markdown("### 📋 Available Charts", elem_classes="markdown-text")
gr.HTML(chart_gallery_html)
# Plot 1: CER Performance Leaderboard (with language filter)
gr.HTML("<div id='cer-leaderboard-section'></div>")
gr.Markdown("## 🏆 CER Performance Leaderboard", elem_classes="markdown-text")
gr.Markdown("*Shows model families by default. Select language(s) above to see top 15 individual models. CER is the preferred metric for low-resource and agglutinative languages.*", elem_classes="markdown-text")
with gr.Accordion("ℹ️ How to interpret this chart", open=False):
gr.Markdown(INTERPRETATIONS['cer_leaderboard'])
viz_language_filter_cer_leaderboard = gr.Dropdown(
choices=ASR_FILTER_OPTIONS["language"],
multiselect=True,
label="Filter by Language",
value=None,
allow_custom_value=False,
)
plot_cer_leaderboard = gr.Plot(value=create_cer_leaderboard(ASR_RESULTS_DF))
gr.Markdown("💡 *For the best experience, view these charts on a desktop or tablet in landscape mode.*", elem_classes="markdown-text")
# Plot 2: WER Performance Leaderboard (with language filter)
gr.HTML("<div id='leaderboard-section'></div>")
gr.Markdown("## 📊 WER Performance Leaderboard", elem_classes="markdown-text")
gr.Markdown("*Shows model families by default. Select language(s) above to see top 15 individual models.*", elem_classes="markdown-text")
with gr.Accordion("ℹ️ How to interpret this chart", open=False):
gr.Markdown(INTERPRETATIONS['leaderboard'])
viz_language_filter_leaderboard = gr.Dropdown(
choices=ASR_FILTER_OPTIONS["language"],
multiselect=True,
label="Filter by Language",
value=None,
allow_custom_value=False,
)
plot_leaderboard = gr.Plot(value=create_model_leaderboard(ASR_RESULTS_DF))
gr.Markdown("💡 *For the best experience, view these charts on a desktop or tablet in landscape mode.*", elem_classes="markdown-text")
# Plot 3: Speed vs Accuracy Tradeoff (Individual Models only)
gr.HTML("<div id='speed-accuracy-section'></div>")
gr.Markdown("## 🚀 Speed versus Accuracy Tradeoffs", elem_classes="markdown-text")
with gr.Accordion("ℹ️ How to interpret this chart", open=False):
gr.Markdown(INTERPRETATIONS['speed_accuracy'])
viz_language_filter_speed = gr.Dropdown(
choices=ASR_FILTER_OPTIONS["language"],
multiselect=True,
label="Filter by Language",
value=None,
allow_custom_value=False,
)
plot_speed_accuracy = gr.Plot(value=create_speed_accuracy_scatter(ASR_RESULTS_DF, view_mode="individual_model"))
gr.Markdown("💡 *For the best experience, view these charts on a desktop or tablet in landscape mode.*", elem_classes="markdown-text")
# Plot 4: CER vs WER Correlation (with language filter)
gr.HTML("<div id='correlation-section'></div>")
gr.Markdown("## 🔍 CER versus WER Correlation", elem_classes="markdown-text")
with gr.Accordion("ℹ️ How to interpret this chart", open=False):
gr.Markdown(INTERPRETATIONS['correlation'])
with gr.Row():
viz_language_filter_correlation = gr.Dropdown(
choices=ASR_FILTER_OPTIONS["language"],
multiselect=True,
label="Filter by Language",
value=["Swahili"],
allow_custom_value=False,
scale=3,
)
top_n_models_slider = gr.Slider(
minimum=0,
maximum=20,
step=1,
value=0,
label="Top N Models (0 = all)",
scale=1,
)
plot_correlation = gr.Plot(value=create_wer_cer_correlation(ASR_RESULTS_DF, languages=["Swahili"]))
gr.Markdown("💡 *For the best experience, view these charts on a desktop or tablet in landscape mode.*", elem_classes="markdown-text")
# Visualization update functions with telemetry (v2 - consolidated tracking)
def update_wer_leaderboard_with_telemetry(langs):
"""Update WER leaderboard with consolidated telemetry tracking."""
# Consolidated: languages included in main event (no separate track_language_interest calls)
track_visualization_view("wer_leaderboard", languages=langs if langs else None)
return create_model_leaderboard(ASR_RESULTS_DF, languages=langs if langs else None)
def update_cer_leaderboard_with_telemetry(langs):
"""Update CER leaderboard with consolidated telemetry tracking."""
# Consolidated: languages included in main event
track_visualization_view("cer_leaderboard", languages=langs if langs else None)
return create_cer_leaderboard(ASR_RESULTS_DF, languages=langs if langs else None)
def update_speed_accuracy_with_telemetry(langs):
"""Update speed-accuracy chart with consolidated telemetry tracking."""
# Consolidated: languages included in main event
track_visualization_view("speed_accuracy", languages=langs if langs else None)
return create_speed_accuracy_scatter(ASR_RESULTS_DF, view_mode="individual_model", languages=langs if langs else None)
def update_correlation_with_telemetry(langs, top_n):
"""Update correlation chart with consolidated telemetry tracking."""
# Consolidated: languages and properties in single event
track_visualization_view(
"wer_cer_correlation",
properties={"top_n_models": str(int(top_n)) if top_n else "all"},
languages=langs if langs else None
)
return create_wer_cer_correlation(
ASR_RESULTS_DF,
languages=langs if langs else None,
top_n_models=int(top_n) if top_n else None
)
# Update plots when language filters change
viz_language_filter_leaderboard.change(
fn=update_wer_leaderboard_with_telemetry,
inputs=[viz_language_filter_leaderboard],
outputs=[plot_leaderboard]
)
viz_language_filter_cer_leaderboard.change(
fn=update_cer_leaderboard_with_telemetry,
inputs=[viz_language_filter_cer_leaderboard],
outputs=[plot_cer_leaderboard]
)
viz_language_filter_speed.change(
fn=update_speed_accuracy_with_telemetry,
inputs=[viz_language_filter_speed],
outputs=[plot_speed_accuracy]
)
# Update correlation chart when language or top N changes
viz_language_filter_correlation.change(
fn=update_correlation_with_telemetry,
inputs=[viz_language_filter_correlation, top_n_models_slider],
outputs=[plot_correlation]
)
top_n_models_slider.change(
fn=update_correlation_with_telemetry,
inputs=[viz_language_filter_correlation, top_n_models_slider],
outputs=[plot_correlation]
)
with gr.TabItem("📝 Benchmark Inputs", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
gr.Markdown("---", elem_classes="markdown-text")
gr.Markdown("## 🗺️ Language Coverage", elem_classes="markdown-text")
gr.Markdown(
"Explore where each language in PazaBench is spoken across Africa and see the sample distribution by language.",
elem_classes="markdown-text"
)
# Row 1: Language selector map and PazaBench overview with sample distribution
gr.Markdown("#### 🔍 Explore by Language", elem_classes="markdown-text")
with gr.Row():
with gr.Column(scale=1):
benchmark_language_filter = gr.Dropdown(
choices=get_all_languages(),
value=None,
label="Select a Language",
multiselect=True,
allow_custom_value=False,
info="Select languages to see where they are spoken"
)
language_location_map = gr.Plot(value=create_language_location_map())
gr.Markdown("💡 *Select a language to see the countries where it is spoken.*", elem_classes="markdown-text")
with gr.Column(scale=2):
language_sample_info = gr.HTML(
value=get_language_sample_info(None, ASR_RESULTS_DF),
)
# Sample Distribution by Language (stacked under PazaBench overview)
gr.Markdown("#### 📊 Sample Distribution by Language", elem_classes="markdown-text")
language_coverage_plot = gr.Plot(value=create_language_coverage_chart())
# Event handlers for language selector with telemetry
def update_benchmark_language_views(langs):
"""Update all language-related views at once to avoid infinite loops."""
# Track telemetry (v2 - consolidated tracking, no separate language_interest calls)
if langs:
track_filter_change("language", langs, tab="benchmark_inputs")
# Consolidated: languages in main event (no separate track_language_interest loops)
track_visualization_view("language_location_map", languages=langs)
track_visualization_view("language_coverage", languages=langs)
# Return all three outputs at once
return (
create_language_location_map(langs if langs else None),
get_language_sample_info(langs if langs else None, ASR_RESULTS_DF),
create_language_coverage_chart(langs if langs else None),
)
benchmark_language_filter.change(
fn=update_benchmark_language_views,
inputs=[benchmark_language_filter],
outputs=[language_location_map, language_sample_info, language_coverage_plot]
)
gr.Markdown(LLM_BENCHMARKS_DATASETS_TEXT, elem_classes="markdown-text")
with gr.TabItem("➕ Request Language Eval", elem_id="lang-eval-tab", id=3):
gr.Markdown(EVALUATION_LANGUAGE_TEXT, elem_classes="markdown-text")
with gr.Row():
dataset_location = gr.Radio(
choices=["HuggingFace", "Other"],
value="HuggingFace",
label="Dataset Location *",
info="Select where your dataset is hosted"
)
with gr.Row():
with gr.Column():
lang_dataset_name = gr.Textbox(
label="Dataset Name *",
placeholder="e.g., username/dataset-name",
info="Full Hugging Face dataset ID",
visible=True
)
lang_dataset_url = gr.Textbox(
label="Dataset URL *",
placeholder="https://example.com/dataset",
info="Full URL to the publicly available dataset",
visible=False
)
lang_dataset_config = gr.Textbox(
label="Dataset Subset/Language",
placeholder="default",
info="Dataset subset or language configuration (leave empty for default)"
)
lang_dataset_split = gr.Textbox(
label="Dataset Split",
placeholder="test",
info="Split to evaluate (default: test)"
)
with gr.Column():
lang_license = gr.Textbox(
label="License *",
placeholder="e.g., CC-BY-4.0, MIT, Apache-2.0",
info="License of the dataset (required)"
)
lang_audio_column = gr.Textbox(
label="Audio Column",
placeholder="audio",
info="Name of the column containing audio data (optional, default: audio)"
)
lang_text_column = gr.Textbox(
label="Text/Transcript Column",
placeholder="text",
info="Name of the column containing transcriptions (optional, default: text)"
)
def toggle_dataset_inputs(location):
"""Toggle visibility of dataset name vs URL based on location selection."""
if location == "HuggingFace":
return gr.update(visible=True), gr.update(visible=False)
else:
return gr.update(visible=False), gr.update(visible=True)
dataset_location.change(
toggle_dataset_inputs,
inputs=[dataset_location],
outputs=[lang_dataset_name, lang_dataset_url]
)
lang_submit_button = gr.Button("Submit Language Evaluation Request", variant="primary")
lang_submission_result = gr.Markdown()
def submit_language_eval_with_telemetry(
location, dataset_name, dataset_url, dataset_config, dataset_split,
audio_column, text_column, license
):
"""Wrapper that adds telemetry to submission attempts and resets form on success."""
from src.telemetry import track_submission_attempt
result = add_language_eval_request(
location, dataset_name, dataset_url, dataset_config, dataset_split,
audio_column, text_column, license
)
# Determine if submission was successful based on result message
success = result and ("success" in result.lower() or "submitted" in result.lower())
error_msg = None if success else result
# Use dataset name or URL based on location
ds_identifier = dataset_name if location == "HuggingFace" else dataset_url
track_submission_attempt(
dataset_name=ds_identifier or "unknown",
success=success,
error_message=error_msg
)
# On success, reset form fields; on error, keep values so user can fix and retry
if success:
# Reset: location, name, url, config, split, audio_col, text_col, license
return (
result, "HuggingFace", "", "", "", "", "", "", "",
gr.update(visible=True), gr.update(visible=False),
)
else:
return (result,) + tuple(gr.update() for _ in range(10))
lang_submit_button.click(
submit_language_eval_with_telemetry,
[
dataset_location,
lang_dataset_name,
lang_dataset_url,
lang_dataset_config,
lang_dataset_split,
lang_audio_column,
lang_text_column,
lang_license,
],
[
lang_submission_result,
dataset_location,
lang_dataset_name,
lang_dataset_url,
lang_dataset_config,
lang_dataset_split,
lang_audio_column,
lang_text_column,
lang_license,
lang_dataset_name, # visibility update
lang_dataset_url, # visibility update
],
)
with gr.Row():
with gr.Accordion("📙 Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
)
# Footer with links visible across all pages and tabs
gr.HTML(footer_html)
# Only start scheduler if HuggingFace Hub is enabled
if HF_ENABLED:
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
print("Background scheduler started for HuggingFace Space restart")
else:
print("HuggingFace Hub disabled, background scheduler not started")
# Use PORT env variable for Azure App Service compatibility, GRADIO_SERVER_PORT for HuggingFace
port = int(os.environ.get("PORT", os.environ.get("WEBSITES_PORT", os.environ.get("GRADIO_SERVER_PORT", 7860))))
print(f"Starting Gradio server on port {port}...")
demo.queue(
default_concurrency_limit=40
).launch(
server_name="0.0.0.0",
server_port=port,
share=False,
ssr_mode=False,
pwa=True,
)