Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from gradio_leaderboard import Leaderboard | |
| import pandas as pd | |
| from huggingface_hub import snapshot_download, create_repo | |
| from huggingface_hub.utils import RepositoryNotFoundError | |
| import os | |
| from src.about import ( | |
| INTRODUCTION_TEXT, | |
| LLM_BENCHMARKS_TEXT, | |
| TITLE, | |
| ) | |
| from src.display.css_html_js import custom_css | |
| from src.display.utils import ( | |
| BENCHMARK_COLS, | |
| COLS, | |
| AutoEvalColumn, | |
| fields, | |
| ) | |
| from src.envs import API, EVAL_RESULTS_PATH, RESULTS_REPO, TOKEN, OWNER | |
| from src.populate import get_leaderboard_df | |
| from src.evaluation.dynamic_eval import run_dynamic_perplexity_eval | |
| def init_leaderboard(dataframe): | |
| if dataframe is None: | |
| raise ValueError("Leaderboard DataFrame is None.") | |
| print("\n=== Initializing Leaderboard ===", flush=True) | |
| print(f"DataFrame shape: {dataframe.shape}", flush=True) | |
| print(f"DataFrame columns: {dataframe.columns.tolist()}", flush=True) | |
| return Leaderboard( | |
| value=dataframe, | |
| select_columns=[c.name for c in fields(AutoEvalColumn) if not c.hidden], | |
| search_columns=[AutoEvalColumn.model.name], | |
| hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden], | |
| filter_columns=[ | |
| AutoEvalColumn.model_type.name, | |
| AutoEvalColumn.precision.name, | |
| ], | |
| ) | |
| def refresh_leaderboard(): | |
| import sys | |
| import traceback | |
| import pandas as pd | |
| try: | |
| sys.stderr.write("=== REFRESH LEADERBOARD DEBUG ===\n") | |
| sys.stderr.write("Refreshing leaderboard data...\n") | |
| sys.stderr.flush() | |
| # Get fresh leaderboard data | |
| df = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS) | |
| sys.stderr.write(f"get_leaderboard_df returned: {type(df)}\n") | |
| if df is not None: | |
| sys.stderr.write(f"DataFrame shape: {df.shape}\n") | |
| sys.stderr.write(f"DataFrame columns: {df.columns.tolist()}\n") | |
| sys.stderr.write(f"DataFrame empty: {df.empty}\n") | |
| else: | |
| sys.stderr.write("DataFrame is None!\n") | |
| sys.stderr.flush() | |
| # Check if DataFrame is valid for leaderboard | |
| if df is None: | |
| sys.stderr.write("DataFrame is None, creating fallback DataFrame\n") | |
| sys.stderr.flush() | |
| # Create a fallback DataFrame | |
| df = create_fallback_dataframe() | |
| elif df.empty: | |
| sys.stderr.write("DataFrame is empty, creating fallback DataFrame\n") | |
| sys.stderr.flush() | |
| # Create a fallback DataFrame for empty case | |
| df = create_fallback_dataframe() | |
| elif not all(col in df.columns for col in COLS): | |
| sys.stderr.write(f"DataFrame missing required columns. Has: {df.columns.tolist()}, Needs: {COLS}\n") | |
| sys.stderr.flush() | |
| # Create a fallback DataFrame for missing columns | |
| df = create_fallback_dataframe() | |
| sys.stderr.write(f"Final DataFrame for leaderboard - Shape: {df.shape}, Columns: {df.columns.tolist()}\n") | |
| sys.stderr.flush() | |
| # Ensure DataFrame has the exact columns expected | |
| for col in COLS: | |
| if col not in df.columns: | |
| sys.stderr.write(f"Adding missing column: {col}\n") | |
| if col in BENCHMARK_COLS or col == AutoEvalColumn.average.name: | |
| df[col] = 0.0 | |
| elif col == AutoEvalColumn.model.name: | |
| df[col] = "Unknown Model" | |
| elif col == AutoEvalColumn.model_type_symbol.name: | |
| df[col] = "?" | |
| else: | |
| df[col] = "" | |
| sys.stderr.flush() | |
| # Reorder columns to match expected order | |
| df = df[COLS] | |
| sys.stderr.write("Creating leaderboard component...\n") | |
| sys.stderr.flush() | |
| new_leaderboard = init_leaderboard(df) | |
| sys.stderr.write("Leaderboard component created successfully\n") | |
| sys.stderr.flush() | |
| return new_leaderboard | |
| except Exception as e: | |
| error_msg = str(e) | |
| traceback_str = traceback.format_exc() | |
| sys.stderr.write(f"CRITICAL ERROR in refresh_leaderboard: {error_msg}\n") | |
| sys.stderr.write(f"Traceback: {traceback_str}\n") | |
| sys.stderr.flush() | |
| # Create emergency fallback leaderboard | |
| try: | |
| sys.stderr.write("Creating emergency fallback leaderboard...\n") | |
| sys.stderr.flush() | |
| fallback_df = create_fallback_dataframe() | |
| return init_leaderboard(fallback_df) | |
| except Exception as fallback_error: | |
| sys.stderr.write(f"Even fallback failed: {fallback_error}\n") | |
| sys.stderr.flush() | |
| raise Exception(f"Complete leaderboard failure: {error_msg}") | |
| def create_fallback_dataframe(): | |
| """Create a minimal valid DataFrame that won't crash the leaderboard""" | |
| import pandas as pd | |
| import sys | |
| sys.stderr.write("Creating fallback DataFrame...\n") | |
| sys.stderr.flush() | |
| # Create minimal valid data | |
| fallback_data = {col: [] for col in COLS} | |
| # Add one dummy row to prevent leaderboard component from crashing | |
| dummy_row = {} | |
| for col in COLS: | |
| if col in BENCHMARK_COLS or col == AutoEvalColumn.average.name: | |
| dummy_row[col] = 0.0 | |
| elif col == AutoEvalColumn.model.name: | |
| dummy_row[col] = "No models evaluated yet" | |
| elif col == AutoEvalColumn.model_type_symbol.name: | |
| dummy_row[col] = "?" | |
| elif col == AutoEvalColumn.precision.name: | |
| dummy_row[col] = "float16" | |
| elif col == AutoEvalColumn.model_type.name: | |
| dummy_row[col] = "pretrained" | |
| elif col == AutoEvalColumn.weight_type.name: | |
| dummy_row[col] = "Original" | |
| elif col == AutoEvalColumn.architecture.name: | |
| dummy_row[col] = "Unknown" | |
| elif col == AutoEvalColumn.still_on_hub.name: | |
| dummy_row[col] = True | |
| elif col == AutoEvalColumn.license.name: | |
| dummy_row[col] = "Unknown" | |
| elif col == AutoEvalColumn.params.name: | |
| dummy_row[col] = 0.0 | |
| elif col == AutoEvalColumn.likes.name: | |
| dummy_row[col] = 0.0 | |
| elif col == AutoEvalColumn.revision.name: | |
| dummy_row[col] = "" | |
| else: | |
| dummy_row[col] = "" | |
| df = pd.DataFrame([dummy_row]) | |
| sys.stderr.write(f"Fallback DataFrame created with shape: {df.shape}\n") | |
| sys.stderr.write(f"Fallback DataFrame columns: {df.columns.tolist()}\n") | |
| sys.stderr.flush() | |
| return df | |
| def run_perplexity_test(model_name, revision, precision): | |
| """Run perplexity evaluation on demand.""" | |
| import sys | |
| import traceback | |
| if not model_name: | |
| return "Please enter a model name.", None | |
| try: | |
| # Use stderr for more reliable logging in HF Spaces | |
| sys.stderr.write(f"\n=== RUNNING PERPLEXITY TEST ===\n") | |
| sys.stderr.write(f"Model: {model_name}\n") | |
| sys.stderr.write(f"Revision: {revision}\n") | |
| sys.stderr.write(f"Precision: {precision}\n") | |
| sys.stderr.flush() | |
| success, result = run_dynamic_perplexity_eval(model_name, revision, precision) | |
| sys.stderr.write(f"Evaluation result - Success: {success}, Result: {result}\n") | |
| sys.stderr.flush() | |
| if success: | |
| try: | |
| # Try to refresh leaderboard | |
| sys.stderr.write("Attempting to refresh leaderboard...\n") | |
| sys.stderr.flush() | |
| new_leaderboard = refresh_leaderboard() | |
| if new_leaderboard is not None: | |
| sys.stderr.write("Leaderboard refresh successful\n") | |
| sys.stderr.flush() | |
| return f"✅ Perplexity evaluation completed!\nPerplexity: {result:.4f}\n\nResults saved and leaderboard updated.", new_leaderboard | |
| else: | |
| sys.stderr.write("Leaderboard refresh returned None\n") | |
| sys.stderr.flush() | |
| return f"✅ Perplexity evaluation completed!\nPerplexity: {result:.4f}\n\n⚠️ Results saved but leaderboard update returned None.\n\nPlease refresh the page to see updated results.", None | |
| except Exception as refresh_error: | |
| # If leaderboard refresh fails, still show success but don't update leaderboard | |
| error_msg = str(refresh_error) | |
| traceback_str = traceback.format_exc() | |
| sys.stderr.write(f"Leaderboard refresh failed: {error_msg}\n") | |
| sys.stderr.write(f"Traceback: {traceback_str}\n") | |
| sys.stderr.flush() | |
| # Check if it's the specific "must have a value set" error | |
| if "must have a value set" in error_msg.lower(): | |
| return f"✅ Perplexity evaluation completed!\nPerplexity: {result:.4f}\n\n⚠️ Results saved but leaderboard component failed to update due to data structure issue.\n\n**Please refresh the page** to see your results in the main leaderboard.", None | |
| else: | |
| return f"✅ Perplexity evaluation completed!\nPerplexity: {result:.4f}\n\n⚠️ Results saved but leaderboard refresh failed: {error_msg}\n\nPlease refresh the page to see updated results.", None | |
| else: | |
| return f"❌ Evaluation failed: {result}", None | |
| except Exception as e: | |
| error_msg = str(e) | |
| traceback_str = traceback.format_exc() | |
| sys.stderr.write(f"Critical error in run_perplexity_test: {error_msg}\n") | |
| sys.stderr.write(f"Traceback: {traceback_str}\n") | |
| sys.stderr.flush() | |
| return f"❌ Critical error: {error_msg}", None | |
| # Initialize results repository and directory | |
| try: | |
| # Try to download existing repository | |
| try: | |
| snapshot_download( | |
| repo_id=RESULTS_REPO, | |
| local_dir=EVAL_RESULTS_PATH, | |
| repo_type="dataset", | |
| tqdm_class=None, | |
| etag_timeout=30, | |
| token=TOKEN | |
| ) | |
| except RepositoryNotFoundError: | |
| # Create the repository if it doesn't exist | |
| print(f"Creating new results repository: {RESULTS_REPO}") | |
| create_repo( | |
| repo_id=RESULTS_REPO, | |
| repo_type="dataset", | |
| private=False, | |
| token=TOKEN | |
| ) | |
| # Create local directory | |
| os.makedirs(EVAL_RESULTS_PATH, exist_ok=True) | |
| except Exception as e: | |
| print(f"Error initializing results: {e}") | |
| # Ensure local directory exists even if repo operations fail | |
| os.makedirs(EVAL_RESULTS_PATH, exist_ok=True) | |
| # Get initial leaderboard data | |
| LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS) | |
| # Create the Gradio interface | |
| demo = gr.Blocks(css=custom_css) | |
| with demo: | |
| gr.HTML(TITLE) | |
| gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") | |
| with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
| with gr.TabItem("🏅 Leaderboard", elem_id="leaderboard-tab", id=0): | |
| leaderboard = init_leaderboard(LEADERBOARD_DF) | |
| with gr.TabItem("📝 About", elem_id="about-tab", id=1): | |
| gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") | |
| with gr.TabItem("🧪 Test Model", elem_id="test-model-tab", id=2): | |
| gr.Markdown("## Run Perplexity Test\n\nTest any Hugging Face model for perplexity evaluation.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| model_name = gr.Textbox(label="Model name", placeholder="openai-community/gpt2") | |
| revision = gr.Textbox(label="Revision", placeholder="main", value="main") | |
| precision = gr.Dropdown( | |
| choices=["float16", "bfloat16"], | |
| label="Precision", | |
| value="float16" | |
| ) | |
| debug_mode = gr.Checkbox(label="Enable debug mode (more verbose logging)", value=True) | |
| with gr.Column(): | |
| test_button = gr.Button("🚀 Run Perplexity Test", variant="primary") | |
| result = gr.Markdown() | |
| gr.Markdown(""" | |
| ### Tips: | |
| - Check stderr logs in HF Spaces for detailed debugging information | |
| - If evaluation succeeds but leaderboard doesn't update, try refreshing the page | |
| - Example models to test: `openai-community/gpt2`, `EleutherAI/gpt-neo-1.3B` | |
| """) | |
| test_button.click( | |
| run_perplexity_test, | |
| [model_name, revision, precision], | |
| [result, leaderboard] | |
| ) | |
| demo.queue(default_concurrency_limit=5).launch() |