File size: 8,598 Bytes
359f755
77c0f20
359f755
de8f813
 
24c8512
359f755
 
 
 
 
 
 
 
 
 
 
1b2d49a
359f755
24c8512
77c0f20
70ea05e
359f755
 
24c8512
 
77c0f20
ce8066d
 
 
 
359f755
1b2d49a
 
 
 
 
 
 
 
359f755
 
c1fc4e2
536d515
 
 
c1fc4e2
536d515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c1fc4e2
536d515
 
 
 
 
 
c1fc4e2
70ea05e
 
536d515
 
ce8066d
70ea05e
c1fc4e2
70ea05e
536d515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70ea05e
24c8512
77c0f20
24c8512
 
 
 
 
 
 
 
 
 
de8f813
24c8512
 
 
 
 
 
 
 
 
 
77c0f20
 
24c8512
 
70ea05e
77c0f20
 
 
 
359f755
 
 
 
 
 
77c0f20
359f755
 
77c0f20
359f755
 
77c0f20
536d515
 
359f755
 
536d515
77c0f20
359f755
70ea05e
 
 
 
536d515
70ea05e
 
77c0f20
 
70ea05e
536d515
 
 
 
 
 
 
77c0f20
70ea05e
77c0f20
c1fc4e2
359f755
 
77c0f20
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
import gradio as gr
from gradio_leaderboard import Leaderboard
import pandas as pd
from huggingface_hub import snapshot_download, create_repo
from huggingface_hub.utils import RepositoryNotFoundError
import os

from src.about import (
    INTRODUCTION_TEXT,
    LLM_BENCHMARKS_TEXT,
    TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
    BENCHMARK_COLS,
    COLS,
    AutoEvalColumn,
    fields,
)
from src.envs import API, EVAL_RESULTS_PATH, RESULTS_REPO, TOKEN, OWNER
from src.populate import get_leaderboard_df
from src.evaluation.dynamic_eval import run_dynamic_perplexity_eval

def init_leaderboard(dataframe):
    if dataframe is None:
        raise ValueError("Leaderboard DataFrame is None.")
    
    print("\n=== Initializing Leaderboard ===", flush=True)
    print(f"DataFrame shape: {dataframe.shape}", flush=True)
    print(f"DataFrame columns: {dataframe.columns.tolist()}", flush=True)
    
    return Leaderboard(
        value=dataframe,
        select_columns=[c.name for c in fields(AutoEvalColumn) if not c.hidden],
        search_columns=[AutoEvalColumn.model.name],
        hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
        filter_columns=[
            AutoEvalColumn.model_type.name,
            AutoEvalColumn.precision.name,
        ],
    )

def refresh_leaderboard():
    import sys
    import traceback
    
    try:
        sys.stderr.write("Refreshing leaderboard data...\n")
        sys.stderr.flush()
        
        # Get fresh leaderboard data
        df = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS)
        sys.stderr.write(f"Got DataFrame with shape: {df.shape}\n")
        sys.stderr.write(f"DataFrame columns: {df.columns.tolist()}\n")
        sys.stderr.flush()
        
        # Check if DataFrame is valid for leaderboard
        if df is None:
            sys.stderr.write("DataFrame is None, cannot create leaderboard\n")
            sys.stderr.flush()
            raise ValueError("DataFrame is None")
            
        if df.empty:
            sys.stderr.write("DataFrame is empty, creating minimal valid DataFrame\n")
            sys.stderr.flush()
            # Create a minimal valid DataFrame that won't crash the leaderboard
            import pandas as pd
            empty_df = pd.DataFrame(columns=COLS)
            # Add one dummy row to prevent leaderboard component from crashing
            dummy_row = {col: 0 if col in BENCHMARK_COLS or col == AutoEvalColumn.average.name else "" for col in COLS}
            dummy_row[AutoEvalColumn.model.name] = "No models evaluated yet"
            dummy_row[AutoEvalColumn.model_type_symbol.name] = "?"
            empty_df = pd.DataFrame([dummy_row])
            return init_leaderboard(empty_df)
        
        sys.stderr.write("Creating leaderboard with valid DataFrame\n")
        sys.stderr.flush()
        return init_leaderboard(df)
        
    except Exception as e:
        error_msg = str(e)
        traceback_str = traceback.format_exc()
        sys.stderr.write(f"Error in refresh_leaderboard: {error_msg}\n")
        sys.stderr.write(f"Traceback: {traceback_str}\n")
        sys.stderr.flush()
        raise

def run_perplexity_test(model_name, revision, precision):
    """Run perplexity evaluation on demand."""
    import sys
    import traceback
    
    if not model_name:
        return "Please enter a model name.", None
    
    try:
        # Use stderr for more reliable logging in HF Spaces
        sys.stderr.write(f"\n=== Running Perplexity Test ===\n")
        sys.stderr.write(f"Model: {model_name}\n")
        sys.stderr.write(f"Revision: {revision}\n")
        sys.stderr.write(f"Precision: {precision}\n")
        sys.stderr.flush()
        
        success, result = run_dynamic_perplexity_eval(model_name, revision, precision)
        sys.stderr.write(f"Evaluation result - Success: {success}, Result: {result}\n")
        sys.stderr.flush()
        
        if success:
            try:
                # Try to refresh leaderboard
                sys.stderr.write("Attempting to refresh leaderboard...\n")
                sys.stderr.flush()
                
                new_leaderboard = refresh_leaderboard()
                sys.stderr.write("Leaderboard refresh successful\n")
                sys.stderr.flush()
                
                return f"✅ Perplexity evaluation completed!\nPerplexity: {result:.4f}\n\nResults saved to leaderboard.", new_leaderboard
            except Exception as refresh_error:
                # If leaderboard refresh fails, still show success but don't update leaderboard
                error_msg = str(refresh_error)
                traceback_str = traceback.format_exc()
                sys.stderr.write(f"Leaderboard refresh failed: {error_msg}\n")
                sys.stderr.write(f"Traceback: {traceback_str}\n")
                sys.stderr.flush()
                
                return f"✅ Perplexity evaluation completed!\nPerplexity: {result:.4f}\n\n⚠️ Results saved but leaderboard refresh failed: {error_msg}\n\nPlease refresh the page to see updated results.", None
        else:
            return f"❌ Evaluation failed: {result}", None
            
    except Exception as e:
        error_msg = str(e)
        traceback_str = traceback.format_exc()
        sys.stderr.write(f"Critical error in run_perplexity_test: {error_msg}\n")
        sys.stderr.write(f"Traceback: {traceback_str}\n")
        sys.stderr.flush()
        return f"❌ Critical error: {error_msg}", None

# Initialize results repository and directory
try:
    # Try to download existing repository
    try:
        snapshot_download(
            repo_id=RESULTS_REPO,
            local_dir=EVAL_RESULTS_PATH,
            repo_type="dataset",
            tqdm_class=None,
            etag_timeout=30,
            token=TOKEN
        )
    except RepositoryNotFoundError:
        # Create the repository if it doesn't exist
        print(f"Creating new results repository: {RESULTS_REPO}")
        create_repo(
            repo_id=RESULTS_REPO,
            repo_type="dataset",
            private=False,
            token=TOKEN
        )
        # Create local directory
        os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)
except Exception as e:
    print(f"Error initializing results: {e}")
    # Ensure local directory exists even if repo operations fail
    os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)

# Get initial leaderboard data
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS)

# Create the Gradio interface
demo = gr.Blocks(css=custom_css)
with demo:
    gr.HTML(TITLE)
    gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")

    with gr.Tabs(elem_classes="tab-buttons") as tabs:
        with gr.TabItem("🏅 Leaderboard", elem_id="leaderboard-tab", id=0):
            leaderboard = init_leaderboard(LEADERBOARD_DF)

        with gr.TabItem("📝 About", elem_id="about-tab", id=1):
            gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")

        with gr.TabItem("🧪 Test Model", elem_id="test-model-tab", id=2):
            gr.Markdown("## Run Perplexity Test\n\nTest any Hugging Face model for perplexity evaluation.")
            
            with gr.Row():
                with gr.Column():
                    model_name = gr.Textbox(label="Model name", placeholder="openai-community/gpt2")
                    revision = gr.Textbox(label="Revision", placeholder="main", value="main")
                    precision = gr.Dropdown(
                        choices=["float16", "bfloat16"],
                        label="Precision",
                        value="float16"
                    )
                    debug_mode = gr.Checkbox(label="Enable debug mode (more verbose logging)", value=True)
                
                with gr.Column():
                    test_button = gr.Button("🚀 Run Perplexity Test", variant="primary")
                    result = gr.Markdown()
            
            gr.Markdown("""
            ### Tips:
            - Check stderr logs in HF Spaces for detailed debugging information
            - If evaluation succeeds but leaderboard doesn't update, try refreshing the page
            - Example models to test: `openai-community/gpt2`, `EleutherAI/gpt-neo-1.3B`
            """)
            
            test_button.click(
                run_perplexity_test,
                [model_name, revision, precision],
                [result, leaderboard]
            )

demo.queue(default_concurrency_limit=5).launch()