omarkamali commited on
Commit
d8f80d6
·
verified ·
1 Parent(s): 821e671

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +2 -2
  2. space/app.py +597 -395
README.md CHANGED
@@ -63,7 +63,7 @@ summary = load_dataset("omneity-labs/lid-benchmark", "results_summary", split="t
63
  |-------|:-:|:-:|:-:|:-:|
64
  | **GlotLID** | **0.9253** | 0.5648 | 0.7772 | 0.4977 |
65
  | OpenLID v2 | 0.8748 | 0.6262 | 0.7762 | 0.5735 |
66
- | OpenLID v3 | 0.8556 | — | 0.6619 | — |
67
  | **Gherbal v4** | 0.8500 | **0.6298** | **0.8699** | **0.6909** |
68
  | OpenLID v1 | 0.8425 | 0.5587 | 0.8296 | 0.4845 |
69
  | NLLB-LID | 0.8331 | 0.1052 | 0.7522 | 0.3348 |
@@ -197,7 +197,7 @@ print(comparison.round(4))
197
  | [NLLB-LID](https://huggingface.co/facebook/fasttext-language-identification) | FastText | 218 | Meta |
198
  | [OpenLID v1](https://huggingface.co/laurievb/OpenLID) | FastText | 201 | Laurie Burchell |
199
  | [OpenLID v2](https://huggingface.co/laurievb/OpenLID) | FastText | 201 | Laurie Burchell |
200
- | [OpenLID v3](https://huggingface.co/laurievb/OpenLID) | FastText | 201 | Laurie Burchell |
201
  | [FastLID-176](https://fasttext.cc/docs/en/language-identification.html) | FastText | 176 | Meta |
202
 
203
  ## Benchmarks
 
63
  |-------|:-:|:-:|:-:|:-:|
64
  | **GlotLID** | **0.9253** | 0.5648 | 0.7772 | 0.4977 |
65
  | OpenLID v2 | 0.8748 | 0.6262 | 0.7762 | 0.5735 |
66
+ | OpenLID v3 (HPLT-LID) | 0.8556 | — | 0.6619 | — |
67
  | **Gherbal v4** | 0.8500 | **0.6298** | **0.8699** | **0.6909** |
68
  | OpenLID v1 | 0.8425 | 0.5587 | 0.8296 | 0.4845 |
69
  | NLLB-LID | 0.8331 | 0.1052 | 0.7522 | 0.3348 |
 
197
  | [NLLB-LID](https://huggingface.co/facebook/fasttext-language-identification) | FastText | 218 | Meta |
198
  | [OpenLID v1](https://huggingface.co/laurievb/OpenLID) | FastText | 201 | Laurie Burchell |
199
  | [OpenLID v2](https://huggingface.co/laurievb/OpenLID) | FastText | 201 | Laurie Burchell |
200
+ | [OpenLID v3 (HPLT-LID)](https://huggingface.co/HPLT/hplt_lid_fl) | FastText | 201 | HPLT |
201
  | [FastLID-176](https://fasttext.cc/docs/en/language-identification.html) | FastText | 176 | Meta |
202
 
203
  ## Benchmarks
space/app.py CHANGED
@@ -2,476 +2,678 @@
2
  LID Benchmark — Language Identification Leaderboard
3
  Built by Omneity Labs · https://www.omneitylabs.com
4
  """
 
 
5
 
 
 
6
  import gradio as gr
7
  import pandas as pd
8
  import plotly.graph_objects as go
9
  from datasets import load_dataset
 
10
 
11
- # ── Brand colors (Omneity gold palette) ──────────────────────────────────────
12
- GOLD = "#C4962C"
13
- GOLD_LIGHT = "#E8C96A"
14
- GOLD_BG = "#FDF8ED"
15
- DARK = "#2A2520"
16
- MUTED = "#8A8078"
17
- TEAL = "#4A8C7A"
18
- NAVY = "#3A5A8C"
19
- WARM_ORANGE = "#C47A2C"
20
- TERRA = "#A05A3C"
21
-
22
- CHART_COLORS = [GOLD, TEAL, NAVY, WARM_ORANGE, TERRA,
23
- "#7A6AAC", "#5A9A5A", "#C44A4A", "#4AC4C4", "#8A8A4A"]
24
-
25
- # ── Load data ─────────────────────────────────────────────────────────────────
26
- DATASET_ID = "omneity-labs/lid-benchmark"
27
 
 
 
 
28
  try:
29
- df_summary = load_dataset(DATASET_ID, "results_summary", split="train").to_pandas()
30
- df_agg = load_dataset(DATASET_ID, "results_aggregate", split="train").to_pandas()
31
- df_lang = load_dataset(DATASET_ID, "results_per_language", split="train").to_pandas()
32
- except Exception:
33
- # Fallback: load from local parquet (for dev)
34
- df_summary = pd.read_parquet("../data/results_summary/train.parquet")
35
  df_agg = pd.read_parquet("../data/results_aggregate/train.parquet")
36
  df_lang = pd.read_parquet("../data/results_per_language/train.parquet")
37
-
38
- ALL_MODELS = sorted(df_summary["model"].unique())
39
- ALL_BENCHMARKS = sorted(df_summary["benchmark"].unique())
40
- ALL_SCOPES = sorted(df_agg["scope"].unique())
41
- ALL_LANGUAGES = sorted(df_lang["language"].unique())
42
-
43
- BENCHMARK_DISPLAY = {
44
- "flores-devtest": "FLORES+ devtest",
45
- "flores-dev": "FLORES+ dev",
46
- "madar": "MADAR",
47
- "gherbal-multi": "Gherbal-Multi",
48
- "atlasia-lid": "ATLASIA-LID",
49
- "wili-2018": "WiLI-2018",
50
- "commonlid": "CommonLID",
51
- "bouquet": "Bouquet",
52
  }
53
 
54
- MODEL_URLS = {
55
- "gherbal-v4": "https://www.omneitylabs.com/models/gherbal",
56
- "gherbal-v3": "https://www.omneitylabs.com/models/gherbal",
57
- "gherbal-v2": "https://www.omneitylabs.com/models/gherbal",
58
- "gherbal-v1": "https://www.omneitylabs.com/models/gherbal",
59
- "glotlid": "https://huggingface.co/cis-lmu/glotlid",
60
- "nllb-lid": "https://huggingface.co/facebook/fasttext-language-identification",
61
- "openlid-v1": "https://huggingface.co/laurievb/OpenLID",
62
- "openlid-v2": "https://huggingface.co/laurievb/OpenLID",
63
- "openlid-v3": "https://huggingface.co/laurievb/OpenLID",
64
- "fastlid-176": "https://fasttext.cc/docs/en/language-identification.html",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  }
66
 
67
 
68
- # ── Tab 1: Leaderboard ───────────────────────────────────────────────────────
69
-
70
- def build_leaderboard(benchmark, scope, metric):
71
- if scope == "full":
72
- src = df_summary[df_summary["benchmark"] == benchmark].copy()
73
- else:
74
- src = df_agg[(df_agg["benchmark"] == benchmark) & (df_agg["scope"] == scope)].copy()
75
-
76
- if src.empty:
77
- return pd.DataFrame({"Info": ["No data for this combination."]})
78
-
79
- src = src.sort_values(metric, ascending=False).reset_index(drop=True)
80
- src.index = src.index + 1
81
- src.index.name = "Rank"
82
-
83
- cols = ["model", metric, "f1_weighted", "n_samples", "n_classes"]
84
- cols = [c for c in cols if c in src.columns]
85
- display = src[cols].copy()
86
- display.columns = ["Model", metric.replace("_", " ").title(),
87
- "F1 Weighted", "Samples", "Classes"][:len(cols)]
88
-
89
- for col in display.columns[1:]:
90
- if display[col].dtype == float:
91
- display[col] = display[col].apply(lambda x: f"{x:.4f}" if pd.notna(x) else "")
92
-
93
- return display
94
-
95
-
96
- def leaderboard_chart(benchmark, scope, metric):
 
 
 
 
 
 
 
 
 
 
 
97
  if scope == "full":
98
- src = df_summary[df_summary["benchmark"] == benchmark].copy()
99
- else:
100
- src = df_agg[(df_agg["benchmark"] == benchmark) & (df_agg["scope"] == scope)].copy()
101
-
102
- src = src.sort_values(metric, ascending=True)
103
-
104
- fig = go.Figure()
105
- fig.add_trace(go.Bar(
106
- y=src["model"], x=src[metric],
107
- orientation="h",
108
- marker_color=[GOLD if "gherbal" in m else MUTED for m in src["model"]],
109
- text=src[metric].apply(lambda x: f"{x:.4f}"),
110
- textposition="outside",
111
- ))
112
- fig.update_layout(
113
- title=f"{BENCHMARK_DISPLAY.get(benchmark, benchmark)} — {metric.replace('_', ' ').title()}",
114
- xaxis_title=metric.replace("_", " ").title(),
115
- yaxis_title="",
116
- plot_bgcolor="white",
117
- paper_bgcolor="white",
118
- font=dict(family="Inter, system-ui, sans-serif", size=13, color=DARK),
119
- margin=dict(l=120, r=40, t=50, b=40),
120
- height=max(350, len(src) * 40 + 80),
121
- xaxis=dict(range=[0, min(1.05, src[metric].max() * 1.15)],
122
- gridcolor="#f0ece4"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  )
124
  return fig
125
 
126
 
127
- # ── Tab 2: Per-Language Explorer ──────────────────────────────────────────────
128
-
129
- def per_language_table(models, benchmark, scope, min_samples):
130
- mask = (
131
- (df_lang["benchmark"] == benchmark) &
132
- (df_lang["scope"] == scope) &
133
- (df_lang["model"].isin(models)) &
134
- (df_lang["n_samples"] >= min_samples)
135
- )
136
- subset = df_lang[mask].copy()
137
-
138
- if subset.empty:
139
- return pd.DataFrame({"Info": ["No data for this combination."]})
140
-
141
- pivot = subset.pivot_table(
142
- index="language", columns="model", values="accuracy"
143
- ).reset_index()
144
-
145
- # Add sample count
146
- sample_counts = subset.groupby("language")["n_samples"].first()
147
- pivot = pivot.merge(sample_counts.reset_index(), on="language", how="left")
148
-
149
- # Sort by first model's accuracy descending
150
- sort_col = models[0] if models[0] in pivot.columns else pivot.columns[1]
151
- pivot = pivot.sort_values(sort_col, ascending=False, na_position="last")
152
-
153
- # Rename
154
- pivot = pivot.rename(columns={"language": "Language", "n_samples": "Samples"})
155
 
156
- # Format floats
157
- for col in pivot.columns:
158
- if col not in ("Language", "Samples") and pivot[col].dtype == float:
159
- pivot[col] = pivot[col].apply(lambda x: f"{x:.4f}" if pd.notna(x) else "—")
160
-
161
- return pivot
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
 
 
 
 
 
 
 
 
163
 
164
- def per_language_chart(models, benchmark, scope, min_samples, top_n):
165
- mask = (
166
- (df_lang["benchmark"] == benchmark) &
167
- (df_lang["scope"] == scope) &
168
- (df_lang["model"].isin(models)) &
169
- (df_lang["n_samples"] >= min_samples)
170
- )
171
- subset = df_lang[mask].copy()
172
 
173
- if subset.empty:
174
- return go.Figure()
 
175
 
176
- # Get top_n languages by average accuracy across selected models
177
- avg_acc = subset.groupby("language")["accuracy"].mean().sort_values(ascending=False)
178
- top_langs = avg_acc.head(top_n).index.tolist()
179
- subset = subset[subset["language"].isin(top_langs)]
 
 
 
 
 
 
 
 
 
 
 
 
180
 
181
- fig = go.Figure()
182
- for i, model in enumerate(models):
183
- model_data = subset[subset["model"] == model].set_index("language")
184
- model_data = model_data.reindex(top_langs)
185
  fig.add_trace(go.Bar(
186
- name=model,
187
- x=model_data.index,
188
- y=model_data["accuracy"],
189
- marker_color=CHART_COLORS[i % len(CHART_COLORS)],
190
  ))
 
191
 
192
- fig.update_layout(
193
- barmode="group",
194
- title=f"Per-Language Accuracy — {BENCHMARK_DISPLAY.get(benchmark, benchmark)}",
195
- yaxis_title="Accuracy",
196
- xaxis_tickangle=-45,
197
- plot_bgcolor="white",
198
- paper_bgcolor="white",
199
- font=dict(family="Inter, system-ui, sans-serif", size=12, color=DARK),
200
- legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
201
- margin=dict(l=50, r=20, t=70, b=100),
202
- height=500,
203
- yaxis=dict(range=[0, 1.05], gridcolor="#f0ece4"),
204
- )
205
  return fig
206
 
207
 
208
- # ── Tab 3: Confusion Explorer ────────────────────────────────────────────────
209
 
210
- def confusion_table(model, benchmark, scope, min_samples):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  mask = (
212
- (df_lang["model"] == model) &
213
- (df_lang["benchmark"] == benchmark) &
214
- (df_lang["scope"] == scope) &
215
- (df_lang["n_samples"] >= min_samples) &
216
- (df_lang["top_confusion_1"].notna())
217
  )
218
- subset = df_lang[mask][
219
  ["language", "accuracy", "n_samples",
220
  "top_confusion_1", "top_confusion_1_count",
221
  "top_confusion_2", "top_confusion_2_count",
222
  "top_confusion_3", "top_confusion_3_count"]
223
  ].copy()
224
-
225
- subset = subset.sort_values("accuracy", ascending=True)
226
- subset["accuracy"] = subset["accuracy"].apply(lambda x: f"{x:.4f}")
227
-
228
- subset.columns = [
229
- "Language", "Accuracy", "Samples",
230
  "Top Confusion", "Count",
231
  "2nd Confusion", "Count ",
232
  "3rd Confusion", "Count ",
233
  ]
234
- return subset
235
 
236
 
237
- # ── Tab 4: Model Comparison (Radar) ──────────────────────────────────────────
 
 
 
238
 
239
- def model_radar(models, scope):
240
- benchmarks = ["flores-devtest", "madar", "gherbal-multi", "atlasia-lid"]
241
- bench_labels = [BENCHMARK_DISPLAY.get(b, b) for b in benchmarks]
 
 
 
 
 
 
 
 
 
242
 
243
- fig = go.Figure()
244
- for i, model in enumerate(models):
245
- values = []
246
- for b in benchmarks:
247
  if scope == "full":
248
- row = df_summary[(df_summary["model"] == model) & (df_summary["benchmark"] == b)]
249
  else:
250
- row = df_agg[(df_agg["model"] == model) & (df_agg["benchmark"] == b) & (df_agg["scope"] == scope)]
251
- val = row["accuracy"].values[0] if len(row) > 0 else 0
252
- values.append(val)
253
- values.append(values[0]) # close the polygon
254
-
255
  fig.add_trace(go.Scatterpolar(
256
- r=values,
257
- theta=bench_labels + [bench_labels[0]],
258
- fill="toself",
259
- name=model,
260
- line_color=CHART_COLORS[i % len(CHART_COLORS)],
261
- fillcolor=CHART_COLORS[i % len(CHART_COLORS)],
262
- opacity=0.3,
263
  ))
264
-
265
- fig.update_layout(
266
- polar=dict(
267
- radialaxis=dict(visible=True, range=[0, 1], gridcolor="#f0ece4"),
268
- bgcolor="white",
269
- ),
270
- plot_bgcolor="white",
271
- paper_bgcolor="white",
272
- font=dict(family="Inter, system-ui, sans-serif", size=13, color=DARK),
273
- title="Model Comparison",
274
- margin=dict(l=60, r=60, t=60, b=40),
275
- height=500,
276
- legend=dict(orientation="h", yanchor="bottom", y=-0.15, xanchor="center", x=0.5),
277
- )
278
  return fig
279
 
280
 
281
- # ── Custom CSS ───────────────────────────────────────────────────────────────
282
 
283
- CUSTOM_CSS = """
284
- .gradio-container {
285
- max-width: 1200px !important;
286
- font-family: 'Inter', system-ui, -apple-system, sans-serif !important;
287
- }
288
- .header-banner {
289
- background: linear-gradient(135deg, #2A2520 0%, #3A3530 100%);
290
- border-radius: 12px;
291
- padding: 28px 32px;
292
- margin-bottom: 16px;
293
- color: white;
294
- }
295
- .header-banner h1 {
296
- color: #E8C96A !important;
297
- font-size: 1.6em !important;
298
- margin-bottom: 4px !important;
299
- font-weight: 700 !important;
300
- }
301
- .header-banner p {
302
- color: #B8B0A8 !important;
303
- font-size: 0.95em !important;
304
- margin: 0 !important;
305
- }
306
- .header-banner a {
307
- color: #E8C96A !important;
308
- text-decoration: none;
309
- }
310
- .header-banner a:hover {
311
- text-decoration: underline;
312
- }
313
- footer { display: none !important; }
314
- """
315
-
316
-
317
- # ── Build the Gradio app ─────────────────────────────────────────────────────
318
 
319
  theme = gr.themes.Soft(
320
  primary_hue=gr.themes.Color(
321
- c50="#FDF8ED", c100="#F8EDCC", c200="#F0DCA0",
322
- c300="#E8C96A", c400="#D4AD3C", c500="#C4962C",
323
- c600="#A07A20", c700="#7C5E18", c800="#584210", c900="#3A2C0A",
324
- c950="#2A2008",
325
- ),
326
  neutral_hue=gr.themes.Color(
327
- c50="#FAF8F5", c100="#F0ECE4", c200="#E0D8CC",
328
- c300="#C8BEB0", c400="#A89E90", c500="#8A8078",
329
- c600="#6A6258", c700="#4A4440", c800="#2A2520",
330
- c900="#1A1815", c950="#0A0908",
331
- ),
332
  font=["Inter", "system-ui", "sans-serif"],
333
  )
334
 
335
- with gr.Blocks(theme=theme, css=CUSTOM_CSS, title="LID Benchmark") as demo:
 
 
336
 
337
- gr.HTML("""
338
- <div class="header-banner">
339
- <h1>🌍 LID Benchmark</h1>
340
- <p>
341
- 10 models · 8 benchmarks · 380 languages ·
342
- Built by <a href="https://www.omneitylabs.com" target="_blank">Omneity Labs</a> ·
343
- Model: <a href="https://www.omneitylabs.com/models/gherbal" target="_blank">Gherbal</a>
344
- </p>
345
- </div>
346
- """)
347
 
348
  with gr.Tabs():
349
 
350
- # ── TAB 1: Leaderboard ────────────────────────────────────────────
351
  with gr.Tab("Leaderboard"):
352
- gr.Markdown("Compare all models on a benchmark. Select scope to control which languages are included.")
353
  with gr.Row():
354
- lb_bench = gr.Dropdown(
355
- choices=ALL_BENCHMARKS, value="flores-devtest",
356
- label="Benchmark", scale=2)
357
- lb_scope = gr.Dropdown(
358
- choices=ALL_SCOPES, value="full",
359
- label="Scope", scale=1)
360
- lb_metric = gr.Dropdown(
361
- choices=["accuracy", "f1_macro", "f1_weighted",
362
- "precision_macro", "recall_macro"],
363
- value="accuracy", label="Metric", scale=1)
364
-
365
- lb_chart = gr.Plot()
366
- lb_table = gr.Dataframe(label="Results")
367
-
368
- def update_leaderboard(bench, scope, metric):
369
- return leaderboard_chart(bench, scope, metric), build_leaderboard(bench, scope, metric)
370
-
371
- for inp in [lb_bench, lb_scope, lb_metric]:
372
- inp.change(update_leaderboard, [lb_bench, lb_scope, lb_metric], [lb_chart, lb_table])
373
-
374
- demo.load(update_leaderboard, [lb_bench, lb_scope, lb_metric], [lb_chart, lb_table])
375
-
376
- # ── TAB 2: Per-Language ───────────────────────────────────────────
377
  with gr.Tab("Per-Language"):
378
- gr.Markdown("Explore accuracy for individual languages. Compare how different models handle specific languages.")
379
  with gr.Row():
380
- pl_models = gr.Dropdown(
381
- choices=ALL_MODELS,
382
- value=["gherbal-v4", "glotlid", "openlid-v2"],
383
- multiselect=True, label="Models", scale=3)
384
- pl_bench = gr.Dropdown(
385
- choices=ALL_BENCHMARKS, value="flores-devtest",
386
- label="Benchmark", scale=2)
387
  with gr.Row():
388
- pl_scope = gr.Dropdown(
389
- choices=ALL_SCOPES, value="full",
390
- label="Scope", scale=1)
391
- pl_min_samples = gr.Slider(
392
- minimum=1, maximum=1000, value=50, step=10,
393
- label="Min samples", scale=1)
394
- pl_top_n = gr.Slider(
395
- minimum=5, maximum=50, value=20, step=5,
396
- label="Top N (chart)", scale=1)
397
-
398
- pl_chart = gr.Plot()
399
- pl_table = gr.Dataframe(label="Per-Language Accuracy")
400
-
401
- def update_per_lang(models, bench, scope, min_s, top_n):
402
- if not models:
403
- return go.Figure(), pd.DataFrame({"Info": ["Select at least one model."]})
404
- return (per_language_chart(models, bench, scope, min_s, top_n),
405
- per_language_table(models, bench, scope, min_s))
406
-
407
- for inp in [pl_models, pl_bench, pl_scope, pl_min_samples, pl_top_n]:
408
- inp.change(update_per_lang,
409
- [pl_models, pl_bench, pl_scope, pl_min_samples, pl_top_n],
410
- [pl_chart, pl_table])
411
-
412
- demo.load(update_per_lang,
413
- [pl_models, pl_bench, pl_scope, pl_min_samples, pl_top_n],
414
- [pl_chart, pl_table])
415
-
416
- # ── TAB 3: Confusion Analysis ─────────────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  with gr.Tab("Confusions"):
418
- gr.Markdown("See which languages are most frequently confused with each other. Sorted worst-first.")
419
  with gr.Row():
420
- cf_model = gr.Dropdown(
421
- choices=ALL_MODELS, value="gherbal-v4",
422
- label="Model", scale=2)
423
- cf_bench = gr.Dropdown(
424
- choices=ALL_BENCHMARKS, value="flores-devtest",
425
- label="Benchmark", scale=2)
426
- cf_scope = gr.Dropdown(
427
- choices=ALL_SCOPES, value="full",
428
- label="Scope", scale=1)
429
- cf_min = gr.Slider(
430
- minimum=1, maximum=500, value=50, step=10,
431
- label="Min samples", scale=1)
432
-
433
- cf_table = gr.Dataframe(label="Confusion Pairs (worst first)")
434
-
435
- for inp in [cf_model, cf_bench, cf_scope, cf_min]:
436
- inp.change(confusion_table, [cf_model, cf_bench, cf_scope, cf_min], cf_table)
437
-
438
- demo.load(confusion_table, [cf_model, cf_bench, cf_scope, cf_min], cf_table)
439
-
440
- # ── TAB 4: Model Comparison ───────────────────────────────────────
441
  with gr.Tab("Compare"):
442
- gr.Markdown("Radar chart comparing selected models across core benchmarks.")
443
  with gr.Row():
444
- mc_models = gr.Dropdown(
445
- choices=ALL_MODELS,
446
- value=["gherbal-v4", "glotlid", "nllb-lid"],
447
- multiselect=True, label="Models", scale=3)
448
- mc_scope = gr.Dropdown(
449
- choices=ALL_SCOPES, value="full",
450
- label="Scope", scale=1)
451
-
452
- mc_chart = gr.Plot()
453
-
454
- def update_radar(models, scope):
455
- if not models:
456
- return go.Figure()
457
- return model_radar(models, scope)
458
-
459
- for inp in [mc_models, mc_scope]:
460
- inp.change(update_radar, [mc_models, mc_scope], mc_chart)
461
-
462
- demo.load(update_radar, [mc_models, mc_scope], mc_chart)
463
-
464
- gr.HTML("""
465
- <div style="text-align: center; padding: 16px 0 8px; color: #8A8078; font-size: 0.85em;">
466
- Data: <a href="https://huggingface.co/datasets/omneity-labs/lid-benchmark"
467
- style="color: #C4962C;">omneity-labs/lid-benchmark</a> ·
468
- Model: <a href="https://www.omneitylabs.com/models/gherbal"
469
- style="color: #C4962C;">Gherbal</a> ·
470
- <a href="https://omarkamali.com" style="color: #C4962C;">Omar Kamali</a> ·
471
- <a href="https://www.omneitylabs.com" style="color: #C4962C;">Omneity Labs</a>
472
- </div>
473
- """)
474
-
475
 
476
  if __name__ == "__main__":
477
- demo.launch(ssr_mode=False)
 
 
2
  LID Benchmark — Language Identification Leaderboard
3
  Built by Omneity Labs · https://www.omneitylabs.com
4
  """
5
+ import os
6
+ os.environ["GRADIO_SSR_MODE"] = "false"
7
 
8
+ import sys
9
+ print("[startup] importing...", flush=True)
10
  import gradio as gr
11
  import pandas as pd
12
  import plotly.graph_objects as go
13
  from datasets import load_dataset
14
+ print("[startup] imports done", flush=True)
15
 
16
+ # ── Brand ─────────────────────────────────────────────────────────────────────
17
+ GOLD, TEAL, NAVY = "#C4962C", "#4A8C7A", "#3A5A8C"
18
+ WARM_ORANGE, TERRA, DARK, MUTED = "#C47A2C", "#A05A3C", "#2A2520", "#8A8078"
19
+ COLORS = [GOLD, TEAL, NAVY, WARM_ORANGE, TERRA,
20
+ "#7A6AAC", "#5A9A5A", "#C44A4A", "#4AC4C4", "#8A8A4A"]
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ # ── Data ──────────────────────────────────────────────────────────────────────
23
+ DS = "omneity-labs/lid-benchmark"
24
+ print("[startup] loading data...", flush=True)
25
  try:
26
+ df_sum = load_dataset(DS, "results_summary", split="train").to_pandas()
27
+ df_agg = load_dataset(DS, "results_aggregate", split="train").to_pandas()
28
+ df_lang = load_dataset(DS, "results_per_language", split="train").to_pandas()
29
+ except Exception as e:
30
+ print(f"[startup] HF failed ({e}), local fallback", flush=True)
31
+ df_sum = pd.read_parquet("../data/results_summary/train.parquet")
32
  df_agg = pd.read_parquet("../data/results_aggregate/train.parquet")
33
  df_lang = pd.read_parquet("../data/results_per_language/train.parquet")
34
+ print(f"[startup] {len(df_sum)}/{len(df_agg)}/{len(df_lang)} rows", flush=True)
35
+
36
+ MODELS = sorted(df_sum["model"].unique())
37
+ BENCHMARKS = sorted(df_sum["benchmark"].unique())
38
+ SCOPES = sorted(df_agg["scope"].unique())
39
+ BENCH_ALL = ["__average__"] + BENCHMARKS
40
+ METRICS = ["accuracy", "f1_macro", "f1_weighted", "precision_macro", "recall_macro"]
41
+ BNAME = {
42
+ "flores-devtest": "FLORES+ devtest", "flores-dev": "FLORES+ dev",
43
+ "madar": "MADAR", "gherbal-multi": "Gherbal-Multi",
44
+ "atlasia-lid": "ATLASIA-LID", "wili-2018": "WiLI-2018",
45
+ "commonlid": "CommonLID", "bouquet": "Bouquet",
46
+ "__average__": "Avg (all benchmarks)",
 
 
47
  }
48
 
49
+ # ── Language names ────────────────────────────────────────────────────────────
50
+ LN = {
51
+ "acm_Arab": "Mesopotamian Arabic", "acq_Arab": "Ta'izzi-Adeni Arabic",
52
+ "acw_Arab": "Hijazi Arabic", "acx_Arab": "Omani Arabic",
53
+ "aeb_Arab": "Tunisian Arabic", "afb_Arab": "Gulf Arabic",
54
+ "apc_Arab": "Levantine Arabic", "apd_Arab": "Sudanese Arabic",
55
+ "arb_Arab": "Modern Standard Arabic", "arb_Latn": "Arabic (Latn)",
56
+ "arq_Arab": "Algerian Arabic", "ars_Arab": "Najdi Arabic",
57
+ "ary_Arab": "Moroccan Arabic", "ary_Latn": "Moroccan Arabic (Latn)",
58
+ "arz_Arab": "Egyptian Arabic", "arz_Latn": "Egyptian Arabic (Latn)",
59
+ "ayl_Arab": "Libyan Arabic", "ayn_Arab": "Sanaani Arabic",
60
+ "ayp_Arab": "N. Mesopotamian Arabic",
61
+ "mey_Arab": "Hassaniya Arabic", "mey_Latn": "Hassaniya (Latn)",
62
+ "kab_Latn": "Kabyle", "taq_Latn": "Tamasheq (Latn)",
63
+ "taq_Tfng": "Tamasheq (Tifinagh)", "tzm_Tfng": "Central Atlas Tamazight",
64
+ "zgh_Tfng": "Std Moroccan Tamazight",
65
+ "eng_Latn": "English", "fra_Latn": "French", "deu_Latn": "German",
66
+ "spa_Latn": "Spanish", "por_Latn": "Portuguese", "ita_Latn": "Italian",
67
+ "nld_Latn": "Dutch", "pol_Latn": "Polish", "ron_Latn": "Romanian",
68
+ "cat_Latn": "Catalan", "eus_Latn": "Basque", "glg_Latn": "Galician",
69
+ "por_Latn_braz1246": "Brazilian Portuguese",
70
+ "rus_Cyrl": "Russian", "ukr_Cyrl": "Ukrainian", "bel_Cyrl": "Belarusian",
71
+ "srp_Cyrl": "Serbian", "bul_Cyrl": "Bulgarian", "mkd_Cyrl": "Macedonian",
72
+ "hrv_Latn": "Croatian", "slv_Latn": "Slovenian", "slk_Latn": "Slovak",
73
+ "ces_Latn": "Czech", "hun_Latn": "Hungarian", "lit_Latn": "Lithuanian",
74
+ "lvs_Latn": "Latvian", "ekk_Latn": "Estonian", "fin_Latn": "Finnish",
75
+ "swe_Latn": "Swedish", "dan_Latn": "Danish",
76
+ "nob_Latn": "Norwegian Bokm\u00e5l", "nno_Latn": "Norwegian Nynorsk",
77
+ "isl_Latn": "Icelandic",
78
+ "cmn_Hans": "Chinese (Simplified)", "cmn_Hant": "Chinese (Traditional)",
79
+ "zho_Hans": "Chinese (Simplified)", "jpn_Jpan": "Japanese",
80
+ "kor_Hang": "Korean (Hangul)", "kor_Kore": "Korean",
81
+ "hin_Deva": "Hindi", "urd_Arab": "Urdu", "ben_Beng": "Bengali",
82
+ "pan_Guru": "Punjabi", "guj_Gujr": "Gujarati", "mar_Deva": "Marathi",
83
+ "tam_Taml": "Tamil", "tel_Telu": "Telugu", "kan_Knda": "Kannada",
84
+ "mal_Mlym": "Malayalam", "sin_Sinh": "Sinhala", "npi_Deva": "Nepali",
85
+ "asm_Beng": "Assamese", "ory_Orya": "Odia",
86
+ "tha_Thai": "Thai", "vie_Latn": "Vietnamese", "ind_Latn": "Indonesian",
87
+ "zsm_Latn": "Malay", "fil_Latn": "Filipino", "khm_Khmr": "Khmer",
88
+ "mya_Mymr": "Burmese", "lao_Laoo": "Lao",
89
+ "tur_Latn": "Turkish", "azj_Latn": "Azerbaijani",
90
+ "kaz_Cyrl": "Kazakh", "uzn_Latn": "Uzbek", "kir_Cyrl": "Kyrgyz",
91
+ "tuk_Latn": "Turkmen", "tat_Cyrl": "Tatar",
92
+ "pes_Arab": "Persian", "prs_Arab": "Dari", "tgk_Cyrl": "Tajik",
93
+ "heb_Hebr": "Hebrew", "kat_Geor": "Georgian", "hye_Armn": "Armenian",
94
+ "ell_Grek": "Greek", "mlt_Latn": "Maltese",
95
+ "swh_Latn": "Swahili", "amh_Ethi": "Amharic", "tir_Ethi": "Tigrinya",
96
+ "som_Latn": "Somali", "gaz_Latn": "Oromo (West Central)",
97
+ "gax_Latn": "Oromo (Borana)", "hau_Latn": "Hausa",
98
+ "yor_Latn": "Yoruba", "ibo_Latn": "Igbo", "wol_Latn": "Wolof",
99
+ "fuc_Latn": "Pulaar", "fuv_Latn": "Nigerian Fulfulde",
100
+ "bam_Latn": "Bambara", "mos_Latn": "Mossi", "ewe_Latn": "Ewe",
101
+ "twi_Latn": "Twi", "fon_Latn": "Fon", "pcm_Latn": "Nigerian Pidgin",
102
+ "dyu_Latn": "Dyula", "kbp_Latn": "Kabiy\u00e8",
103
+ "zul_Latn": "Zulu", "xho_Latn": "Xhosa", "sot_Latn": "Southern Sotho",
104
+ "tsn_Latn": "Tswana", "ssw_Latn": "Swati", "nso_Latn": "Northern Sotho",
105
+ "tso_Latn": "Tsonga", "sna_Latn": "Shona", "nya_Latn": "Chichewa",
106
+ "ven_Latn": "Venda", "naq_Latn": "Nama", "xuu_Latn": "Khwedam",
107
+ "lin_Latn": "Lingala", "lua_Latn": "Luba-Kasai", "sag_Latn": "Sango",
108
+ "tum_Latn": "Tumbuka", "bem_Latn": "Bemba", "kmb_Latn": "Kimbundu",
109
+ "umb_Latn": "Umbundu", "kin_Latn": "Kinyarwanda",
110
+ "run_Latn": "Kirundi", "lug_Latn": "Luganda", "luo_Latn": "Dholuo",
111
+ "kam_Latn": "Kamba", "kik_Latn": "Kikuyu", "mas_Latn": "Maasai",
112
+ "guz_Latn": "Gusii", "jmc_Latn": "Machame", "enb_Latn": "Markweeta",
113
+ "dik_Latn": "SW Dinka", "nus_Latn": "Nuer",
114
+ "mlg": "Malagasy", "plt_Latn": "Plateau Malagasy",
115
+ "ckb_Arab": "Central Kurdish", "afr_Latn": "Afrikaans",
116
+ "hat_Latn": "Haitian Creole", "ceb_Latn": "Cebuano",
117
+ "jav_Latn": "Javanese", "sun_Latn": "Sundanese",
118
+ "tgl_Latn": "Tagalog", "war_Latn": "Waray",
119
+ "bre_Latn": "Breton", "bre": "Breton", "cym_Latn": "Welsh",
120
+ "gla_Latn": "Scottish Gaelic", "gle_Latn": "Irish",
121
+ "fry_Latn": "W Frisian", "fry": "W Frisian",
122
+ "oci_Latn": "Occitan", "ast_Latn": "Asturian",
123
+ "bod_Tibt": "Tibetan", "div_Thaa": "Dhivehi",
124
+ "fao_Latn": "Faroese", "ltz_Latn": "Luxembourgish",
125
+ "sme_Latn": "Northern Sami", "epo_Latn": "Esperanto",
126
+ "lat": "Latin", "san_Deva": "Sanskrit",
127
+ "bos_Latn": "Bosnian", "srd_Latn": "Sardinian",
128
+ "roh_Latn": "Romansh", "fur_Latn": "Friulian",
129
+ "snd_Arab": "Sindhi", "pnb_Arab": "W Punjabi",
130
+ "bho_Deva": "Bhojpuri", "mai_Deva": "Maithili",
131
+ "awa_Deva": "Awadhi", "mag_Deva": "Magahi",
132
+ "kmr_Latn": "N Kurdish", "diq_Latn": "Zazaki",
133
+ "nqo_Nkoo": "N'Ko", "ace_Latn": "Acehnese", "ban_Latn": "Balinese",
134
+ "bug_Latn": "Buginese", "min_Latn": "Minangkabau",
135
+ "ilo_Latn": "Ilocano", "pag_Latn": "Pangasinan",
136
+ "mri_Latn": "M\u0101ori", "smo_Latn": "Samoan", "fij_Latn": "Fijian",
137
+ "tpi_Latn": "Tok Pisin", "che_Cyrl": "Chechen", "bak_Cyrl": "Bashkir",
138
+ "chv_Cyrl": "Chuvash", "khk_Cyrl": "Mongolian", "uig_Arab": "Uyghur",
139
+ "ydd_Hebr": "Yiddish", "lij_Latn": "Ligurian", "lmo_Latn": "Lombard",
140
+ "vec_Latn": "Venetian", "scn_Latn": "Sicilian", "szl_Latn": "Silesian",
141
+ "als_Latn": "Albanian (Tosk)", "wuu_Hans": "Wu Chinese", "wuu": "Wu",
142
+ "yue_Hant": "Cantonese", "yue": "Cantonese",
143
+ "sat_Olck": "Santali", "quy_Latn": "Quechua",
144
  }
145
 
146
 
147
+ def ld(code):
148
+ """Return 'Name (code)' for display."""
149
+ n = LN.get(code)
150
+ return f"{n} ({code})" if n else code
151
+
152
+
153
+ # ── Regions ───────────────────────────────────────────────────────────────────
154
+ ARABIC_16 = [
155
+ "acm_Arab", "acq_Arab", "acw_Arab", "acx_Arab", "aeb_Arab", "afb_Arab",
156
+ "apc_Arab", "apd_Arab", "arb_Arab", "arq_Arab", "ars_Arab", "ary_Arab",
157
+ "arz_Arab", "ayl_Arab", "ayn_Arab", "ayp_Arab",
158
+ ]
159
+ NORTH_AFRICAN = [
160
+ "ary_Arab", "ary_Latn", "arq_Arab", "aeb_Arab", "ayl_Arab",
161
+ "arz_Arab", "arz_Latn", "mey_Arab", "mey_Latn",
162
+ "kab_Latn", "taq_Latn", "taq_Tfng", "tzm_Tfng", "zgh_Tfng", "arb_Arab",
163
+ ]
164
+ AFRICAN = list(set(
165
+ ["swh_Latn", "amh_Ethi", "tir_Ethi", "som_Latn", "gaz_Latn", "gax_Latn",
166
+ "kin_Latn", "run_Latn", "lug_Latn", "luo_Latn", "nyn", "kam_Latn",
167
+ "kik_Latn", "mas_Latn", "guz_Latn", "jmc_Latn", "enb_Latn",
168
+ "hau_Latn", "yor_Latn", "ibo_Latn", "wol_Latn", "fuc_Latn", "fuv_Latn",
169
+ "bam_Latn", "mos_Latn", "ewe_Latn", "twi_Latn", "fon_Latn", "pcm_Latn",
170
+ "dyu_Latn", "kbp_Latn",
171
+ "zul_Latn", "xho_Latn", "sot_Latn", "tsn_Latn", "ssw_Latn", "nso_Latn",
172
+ "tso_Latn", "sna_Latn", "nya_Latn", "ven_Latn", "naq_Latn", "xuu_Latn",
173
+ "afr_Latn", "lin_Latn", "lua_Latn", "sag_Latn", "tum_Latn", "bem_Latn",
174
+ "kmb_Latn", "umb_Latn", "loa_Latn", "kwy_Latn", "cjk_Latn",
175
+ "mlg", "plt_Latn", "dik_Latn", "nus_Latn"]
176
+ + NORTH_AFRICAN
177
+ ))
178
+
179
+
180
+ # ── Data helpers ──────────────────────────────────────────────────────────────
181
+
182
+ def get_agg(bench, scope, metric):
183
+ """Model-level aggregated metric values."""
184
+ if bench == "__average__":
185
+ src = df_sum if scope == "full" else df_agg[df_agg["scope"] == scope]
186
+ return src.groupby("model")[metric].mean().reset_index()
187
  if scope == "full":
188
+ return df_sum[df_sum["benchmark"] == bench].copy()
189
+ return df_agg[(df_agg["benchmark"] == bench) & (df_agg["scope"] == scope)].copy()
190
+
191
+
192
+ def get_lang(bench, scope, models, lang_filter=None):
193
+ """Per-language accuracy, optionally averaged across benchmarks."""
194
+ mask = (df_lang["scope"] == scope) & (df_lang["model"].isin(models))
195
+ if lang_filter is not None:
196
+ mask = mask & (df_lang["language"].isin(lang_filter))
197
+ if bench != "__average__":
198
+ mask = mask & (df_lang["benchmark"] == bench)
199
+
200
+ sub = df_lang[mask].copy()
201
+ if sub.empty:
202
+ return sub
203
+
204
+ if bench == "__average__":
205
+ return (sub.groupby(["model", "language"])
206
+ .agg(accuracy=("accuracy", "mean"), n_samples=("n_samples", "max"))
207
+ .reset_index())
208
+ return sub
209
+
210
+
211
+ # ── Chart builders (using Figure constructor to ensure axis ranges) ───────────
212
+
213
+ def make_hbar(labels, values, colors, title, height):
214
+ """Horizontal bar chart with LOCKED x-axis 0-1."""
215
+ # Convert everything to plain Python lists
216
+ labels = [str(l) for l in labels]
217
+ values = [float(v) if pd.notna(v) else 0.0 for v in values]
218
+ colors = list(colors)
219
+ texts = [f"{v:.4f}" for v in values]
220
+
221
+ fig = go.Figure(
222
+ data=[go.Bar(
223
+ y=labels, x=values, orientation="h",
224
+ marker_color=colors,
225
+ text=texts, textposition="inside",
226
+ insidetextanchor="end",
227
+ textfont=dict(color="white", size=12),
228
+ )],
229
+ layout=go.Layout(
230
+ title=dict(text=title, x=0),
231
+ xaxis=dict(
232
+ range=[0, 1.0], autorange=False, fixedrange=True,
233
+ title="", gridcolor="#f0ece4", zeroline=False,
234
+ dtick=0.1,
235
+ ),
236
+ yaxis=dict(automargin=True, fixedrange=True),
237
+ plot_bgcolor="white", paper_bgcolor="white",
238
+ font=dict(family="Inter, system-ui, sans-serif", size=13,
239
+ color=DARK),
240
+ margin=dict(l=10, r=30, t=50, b=40),
241
+ height=height, bargap=0.25,
242
+ ),
243
  )
244
  return fig
245
 
246
 
247
+ def make_vbar(categories, traces, title, height=500):
248
+ """Vertical grouped bar chart with LOCKED y-axis 0-1."""
249
+ categories = [str(c) for c in categories]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
+ fig = go.Figure(layout=go.Layout(
252
+ title=dict(text=title, x=0),
253
+ barmode="group",
254
+ xaxis=dict(
255
+ tickangle=-45, automargin=True, fixedrange=True,
256
+ type="category",
257
+ ),
258
+ yaxis=dict(
259
+ range=[0, 1.05], autorange=False, fixedrange=True,
260
+ title="Accuracy", gridcolor="#f0ece4", zeroline=False,
261
+ dtick=0.2,
262
+ ),
263
+ plot_bgcolor="white", paper_bgcolor="white",
264
+ font=dict(family="Inter, system-ui, sans-serif", size=11, color=DARK),
265
+ legend=dict(orientation="h", yanchor="bottom", y=1.02,
266
+ xanchor="right", x=1),
267
+ margin=dict(l=50, r=20, t=70, b=10),
268
+ height=height, bargap=0.15, bargroupgap=0.05,
269
+ ))
270
 
271
+ for name, vals, color in traces:
272
+ # Ensure plain Python floats, replace NaN with None
273
+ clean = [float(v) if pd.notna(v) else None for v in vals]
274
+ fig.add_trace(go.Bar(
275
+ name=name, x=categories, y=clean,
276
+ marker_color=color,
277
+ ))
278
+ return fig
279
 
 
 
 
 
 
 
 
 
280
 
281
+ def make_hbar_grouped(categories, traces, title, height):
282
+ """Horizontal grouped bar chart with LOCKED x-axis 0-1."""
283
+ categories = [str(c) for c in categories]
284
 
285
+ fig = go.Figure(layout=go.Layout(
286
+ title=dict(text=title, x=0),
287
+ barmode="group",
288
+ xaxis=dict(
289
+ range=[0, 1.05], autorange=False, fixedrange=True,
290
+ title="Accuracy", gridcolor="#f0ece4", zeroline=False,
291
+ dtick=0.2,
292
+ ),
293
+ yaxis=dict(automargin=True, fixedrange=True),
294
+ plot_bgcolor="white", paper_bgcolor="white",
295
+ font=dict(family="Inter, system-ui, sans-serif", size=11, color=DARK),
296
+ legend=dict(orientation="h", yanchor="bottom", y=1.02,
297
+ xanchor="right", x=1),
298
+ margin=dict(l=10, r=20, t=70, b=40),
299
+ height=height, bargap=0.2, bargroupgap=0.05,
300
+ ))
301
 
302
+ for name, vals, color in traces:
303
+ clean = [float(v) if pd.notna(v) else None for v in vals]
304
+ texts = [f"{v:.2f}" if v is not None else "" for v in clean]
 
305
  fig.add_trace(go.Bar(
306
+ name=name, y=categories, x=clean, orientation="h",
307
+ marker_color=color,
308
+ text=texts, textposition="inside",
309
+ insidetextanchor="end", textfont=dict(size=10),
310
  ))
311
+ return fig
312
 
313
+
314
+ def empty_fig(msg="No data for this selection."):
315
+ fig = go.Figure(layout=go.Layout(
316
+ xaxis=dict(visible=False), yaxis=dict(visible=False),
317
+ plot_bgcolor="white", paper_bgcolor="white", height=300,
318
+ annotations=[dict(text=msg, xref="paper", yref="paper",
319
+ x=0.5, y=0.5, showarrow=False,
320
+ font=dict(size=16, color=MUTED))],
321
+ ))
 
 
 
 
322
  return fig
323
 
324
 
325
+ # ── Tab logic ─────────────────────────────────────────────────────────────────
326
 
327
+ def fn_leaderboard(bench, scope, metric):
328
+ src = get_agg(bench, scope, metric)
329
+ if src.empty:
330
+ return empty_fig(), pd.DataFrame({"Info": ["No data."]})
331
+
332
+ src = src.sort_values(metric, ascending=True).reset_index(drop=True)
333
+ labels = src["model"].tolist()
334
+ vals = src[metric].tolist()
335
+ clrs = [GOLD if "gherbal" in m else MUTED for m in labels]
336
+ title = f"{BNAME.get(bench, bench)} \u2014 {metric.replace('_',' ').title()}"
337
+ h = max(350, len(labels) * 42 + 100)
338
+ chart = make_hbar(labels, vals, clrs, title, h)
339
+
340
+ # Table
341
+ tbl = src.sort_values(metric, ascending=False).reset_index(drop=True)
342
+ tbl.index = tbl.index + 1
343
+ if bench == "__average__":
344
+ cols = ["model", metric]
345
+ else:
346
+ cols = [c for c in ["model", metric, "f1_weighted", "n_samples", "n_classes"]
347
+ if c in tbl.columns]
348
+ out = tbl[cols].copy()
349
+ out.columns = ["Model"] + [c.replace("_", " ").title() for c in cols[1:]]
350
+ for c in out.columns[1:]:
351
+ if out[c].dtype == float:
352
+ out[c] = out[c].map(lambda v: f"{v:.4f}" if pd.notna(v) else "\u2014")
353
+ return chart, out
354
+
355
+
356
+ def fn_perlang(mdls, bench, scope, mins, topn):
357
+ if not mdls:
358
+ return empty_fig("Select at least one model."), pd.DataFrame()
359
+ sub = get_lang(bench, scope, mdls)
360
+ sub = sub[sub["n_samples"] >= mins]
361
+ if sub.empty:
362
+ return empty_fig(), pd.DataFrame()
363
+
364
+ # Top N languages by average accuracy
365
+ avg = sub.groupby("language")["accuracy"].mean().sort_values(ascending=False)
366
+ top = avg.head(int(topn)).index.tolist()
367
+ sub = sub[sub["language"].isin(top)]
368
+ cats = [ld(c) for c in top]
369
+
370
+ traces = []
371
+ for i, m in enumerate(mdls):
372
+ md = sub[sub["model"] == m].set_index("language")
373
+ vals = [md.loc[l, "accuracy"] if l in md.index else None for l in top]
374
+ traces.append((m, vals, COLORS[i % len(COLORS)]))
375
+
376
+ title = f"Per-Language Accuracy \u2014 {BNAME.get(bench, bench)}"
377
+ chart = make_vbar(cats, traces, title)
378
+
379
+ # Table
380
+ pivot = sub.pivot_table(index="language", columns="model",
381
+ values="accuracy").reset_index()
382
+ sc = sub.groupby("language")["n_samples"].first()
383
+ pivot = pivot.merge(sc.reset_index(), on="language", how="left")
384
+ sort_col = mdls[0] if mdls[0] in pivot.columns else pivot.columns[1]
385
+ pivot = pivot.sort_values(sort_col, ascending=False, na_position="last")
386
+ pivot.insert(1, "Name", pivot["language"].map(lambda c: LN.get(c, "")))
387
+ pivot = pivot.rename(columns={"language": "Code", "n_samples": "Samples"})
388
+ for c in pivot.columns:
389
+ if c not in ("Code", "Name", "Samples") and pivot[c].dtype == float:
390
+ pivot[c] = pivot[c].map(
391
+ lambda v: f"{v:.4f}" if pd.notna(v) else "\u2014")
392
+ return chart, pivot
393
+
394
+
395
+ def fn_region(lang_list, tab_name, mdls, bench, scope):
396
+ if not mdls:
397
+ return empty_fig("Select at least one model."), pd.DataFrame()
398
+ sub = get_lang(bench, scope, mdls, lang_filter=lang_list)
399
+ if sub.empty:
400
+ return empty_fig("No data."), pd.DataFrame()
401
+
402
+ order = (sub.groupby("language")["accuracy"].mean()
403
+ .sort_values(ascending=True).index.tolist())
404
+ cats = [ld(c) for c in order]
405
+
406
+ traces = []
407
+ for i, m in enumerate(mdls):
408
+ md = sub[sub["model"] == m].set_index("language")
409
+ vals = [md.loc[l, "accuracy"] if l in md.index else None for l in order]
410
+ traces.append((m, vals, COLORS[i % len(COLORS)]))
411
+
412
+ title = f"{tab_name} \u2014 {BNAME.get(bench, bench)}"
413
+ n = len(order)
414
+ h = max(400, n * 26 * max(1, len(mdls)) + 120)
415
+ chart = make_hbar_grouped(cats, traces, title, h)
416
+
417
+ # Table
418
+ pivot = sub.pivot_table(index="language", columns="model",
419
+ values="accuracy").reset_index()
420
+ sc = sub.groupby("language")["n_samples"].first()
421
+ pivot = pivot.merge(sc.reset_index(), on="language", how="left")
422
+ sort_col = mdls[0] if mdls[0] in pivot.columns else pivot.columns[1]
423
+ pivot = pivot.sort_values(sort_col, ascending=False, na_position="last")
424
+ pivot.insert(1, "Name", pivot["language"].map(lambda c: LN.get(c, c)))
425
+ pivot = pivot.rename(columns={"language": "Code", "n_samples": "Samples"})
426
+ for c in pivot.columns:
427
+ if c not in ("Code", "Name", "Samples") and pivot[c].dtype == float:
428
+ pivot[c] = pivot[c].map(
429
+ lambda v: f"{v:.4f}" if pd.notna(v) else "\u2014")
430
+ return chart, pivot
431
+
432
+
433
+ def fn_confusions(model, bench, scope, mins):
434
+ if bench == "__average__":
435
+ return pd.DataFrame({"Info": ["Pick a specific benchmark."]})
436
  mask = (
437
+ (df_lang["model"] == model) & (df_lang["benchmark"] == bench)
438
+ & (df_lang["scope"] == scope) & (df_lang["n_samples"] >= mins)
439
+ & (df_lang["top_confusion_1"].notna())
 
 
440
  )
441
+ out = df_lang[mask][
442
  ["language", "accuracy", "n_samples",
443
  "top_confusion_1", "top_confusion_1_count",
444
  "top_confusion_2", "top_confusion_2_count",
445
  "top_confusion_3", "top_confusion_3_count"]
446
  ].copy()
447
+ out = out.sort_values("accuracy", ascending=True)
448
+ out.insert(1, "Name", out["language"].map(lambda c: LN.get(c, "")))
449
+ out["accuracy"] = out["accuracy"].map(lambda v: f"{v:.4f}")
450
+ out.columns = [
451
+ "Code", "Name", "Accuracy", "Samples",
 
452
  "Top Confusion", "Count",
453
  "2nd Confusion", "Count ",
454
  "3rd Confusion", "Count ",
455
  ]
456
+ return out
457
 
458
 
459
+ def fn_compare(mdls, scope):
460
+ if not mdls:
461
+ return empty_fig("Select at least one model.")
462
+ labels = [BNAME.get(b, b) for b in BENCHMARKS]
463
 
464
+ fig = go.Figure(layout=go.Layout(
465
+ polar=dict(
466
+ radialaxis=dict(visible=True, range=[0, 1], gridcolor="#f0ece4"),
467
+ bgcolor="white",
468
+ ),
469
+ title=dict(text="Model Comparison \u2014 All 8 Benchmarks", x=0),
470
+ plot_bgcolor="white", paper_bgcolor="white",
471
+ font=dict(family="Inter, system-ui, sans-serif", size=12, color=DARK),
472
+ margin=dict(l=80, r=80, t=60, b=60), height=550,
473
+ legend=dict(orientation="h", yanchor="bottom", y=-0.2,
474
+ xanchor="center", x=0.5),
475
+ ))
476
 
477
+ for i, m in enumerate(mdls):
478
+ vals = []
479
+ for b in BENCHMARKS:
 
480
  if scope == "full":
481
+ r = df_sum[(df_sum["model"] == m) & (df_sum["benchmark"] == b)]
482
  else:
483
+ r = df_agg[(df_agg["model"] == m) & (df_agg["benchmark"] == b)
484
+ & (df_agg["scope"] == scope)]
485
+ vals.append(float(r["accuracy"].values[0]) if len(r) > 0 else 0.0)
486
+ vals.append(vals[0])
 
487
  fig.add_trace(go.Scatterpolar(
488
+ r=vals, theta=labels + [labels[0]],
489
+ fill="toself", name=m,
490
+ line_color=COLORS[i % len(COLORS)],
491
+ fillcolor=COLORS[i % len(COLORS)], opacity=0.3,
 
 
 
492
  ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
  return fig
494
 
495
 
496
+ # ── CSS & Theme ───────────────────────────────────────────────────────────────
497
 
498
+ CSS = (
499
+ ".gradio-container{max-width:1200px!important;"
500
+ "font-family:'Inter',system-ui,-apple-system,sans-serif!important}"
501
+ ".header-banner{background:linear-gradient(135deg,#2A2520 0%,#3A3530 100%);"
502
+ "border-radius:12px;padding:28px 32px;margin-bottom:16px;color:white}"
503
+ ".header-banner h1{color:#E8C96A!important;font-size:1.6em!important;"
504
+ "margin-bottom:4px!important;font-weight:700!important}"
505
+ ".header-banner p{color:#B8B0A8!important;font-size:.95em!important;margin:0!important}"
506
+ ".header-banner a{color:#E8C96A!important;text-decoration:none}"
507
+ ".header-banner a:hover{text-decoration:underline}"
508
+ "footer{display:none!important}"
509
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
 
511
  theme = gr.themes.Soft(
512
  primary_hue=gr.themes.Color(
513
+ c50="#FDF8ED", c100="#F8EDCC", c200="#F0DCA0", c300="#E8C96A",
514
+ c400="#D4AD3C", c500="#C4962C", c600="#A07A20", c700="#7C5E18",
515
+ c800="#584210", c900="#3A2C0A", c950="#2A2008"),
 
 
516
  neutral_hue=gr.themes.Color(
517
+ c50="#FAF8F5", c100="#F0ECE4", c200="#E0D8CC", c300="#C8BEB0",
518
+ c400="#A89E90", c500="#8A8078", c600="#6A6258", c700="#4A4440",
519
+ c800="#2A2520", c900="#1A1815", c950="#0A0908"),
 
 
520
  font=["Inter", "system-ui", "sans-serif"],
521
  )
522
 
523
+ # ── App ───────────────────────────────────────────────────────────────────────
524
+
525
+ with gr.Blocks(theme=theme, css=CSS, title="LID Benchmark") as demo:
526
 
527
+ gr.HTML(
528
+ '<div class="header-banner">'
529
+ '<h1>\U0001f30d LID Benchmark</h1>'
530
+ '<p>10 models \u00b7 8 benchmarks \u00b7 380 languages \u00b7 '
531
+ 'Built by <a href="https://www.omneitylabs.com" target="_blank">'
532
+ 'Omneity Labs</a> \u00b7 Model: '
533
+ '<a href="https://www.omneitylabs.com/models/gherbal" '
534
+ 'target="_blank">Gherbal</a></p></div>'
535
+ )
 
536
 
537
  with gr.Tabs():
538
 
539
+ # ── Leaderboard ──────────────────────────────────────────
540
  with gr.Tab("Leaderboard"):
541
+ gr.Markdown("Compare all models. *Average* = mean across all 8 benchmarks.")
542
  with gr.Row():
543
+ lb_b = gr.Dropdown(BENCH_ALL, value="__average__",
544
+ label="Benchmark", scale=2)
545
+ lb_s = gr.Dropdown(SCOPES, value="v4", label="Scope", scale=1)
546
+ lb_m = gr.Dropdown(METRICS, value="accuracy",
547
+ label="Metric", scale=1)
548
+ lb_c = gr.Plot()
549
+ lb_t = gr.Dataframe(label="Results")
550
+ for w in [lb_b, lb_s, lb_m]:
551
+ w.change(fn_leaderboard, [lb_b, lb_s, lb_m], [lb_c, lb_t])
552
+ demo.load(fn_leaderboard, [lb_b, lb_s, lb_m], [lb_c, lb_t])
553
+
554
+ # ── Per-Language ─────────────────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
555
  with gr.Tab("Per-Language"):
556
+ gr.Markdown("Per-language accuracy across models.")
557
  with gr.Row():
558
+ pl_mod = gr.Dropdown(MODELS,
559
+ value=["gherbal-v4", "glotlid", "openlid-v2"],
560
+ multiselect=True, label="Models", scale=3)
561
+ pl_b = gr.Dropdown(BENCH_ALL, value="__average__",
562
+ label="Benchmark", scale=2)
 
 
563
  with gr.Row():
564
+ pl_s = gr.Dropdown(SCOPES, value="v4", label="Scope", scale=1)
565
+ pl_min = gr.Slider(1, 1000, 50, step=10,
566
+ label="Min samples", scale=1)
567
+ pl_top = gr.Slider(5, 50, 20, step=5,
568
+ label="Top N (chart)", scale=1)
569
+ pl_c = gr.Plot()
570
+ pl_t = gr.Dataframe(label="Per-Language Accuracy")
571
+ for w in [pl_mod, pl_b, pl_s, pl_min, pl_top]:
572
+ w.change(fn_perlang, [pl_mod, pl_b, pl_s, pl_min, pl_top],
573
+ [pl_c, pl_t])
574
+ demo.load(fn_perlang, [pl_mod, pl_b, pl_s, pl_min, pl_top],
575
+ [pl_c, pl_t])
576
+
577
+ # ── Arabic Dialects ──────────────────────────────────────
578
+ with gr.Tab("Arabic Dialects"):
579
+ gr.Markdown("All **16 Arabic dialect** variants.")
580
+ with gr.Row():
581
+ ad_m = gr.Dropdown(MODELS,
582
+ value=["gherbal-v4", "glotlid", "openlid-v2", "nllb-lid"],
583
+ multiselect=True, label="Models", scale=3)
584
+ ad_b = gr.Dropdown(BENCH_ALL, value="__average__",
585
+ label="Benchmark", scale=2)
586
+ ad_s = gr.Dropdown(SCOPES, value="v4", label="Scope", scale=1)
587
+ ad_c = gr.Plot()
588
+ ad_t = gr.Dataframe(label="Arabic Dialect Results")
589
+
590
+ def _ad(m, b, s):
591
+ return fn_region(ARABIC_16, "Arabic Dialects", m, b, s)
592
+ for w in [ad_m, ad_b, ad_s]:
593
+ w.change(_ad, [ad_m, ad_b, ad_s], [ad_c, ad_t])
594
+ demo.load(_ad, [ad_m, ad_b, ad_s], [ad_c, ad_t])
595
+
596
+ # ── North African ────────────────────────────────────────
597
+ with gr.Tab("North African"):
598
+ gr.Markdown("**Maghreb Arabic**, **Hassaniya**, **Amazigh/Berber**.")
599
+ with gr.Row():
600
+ na_m = gr.Dropdown(MODELS,
601
+ value=["gherbal-v4", "glotlid", "openlid-v2", "nllb-lid"],
602
+ multiselect=True, label="Models", scale=3)
603
+ na_b = gr.Dropdown(BENCH_ALL, value="__average__",
604
+ label="Benchmark", scale=2)
605
+ na_s = gr.Dropdown(SCOPES, value="v4", label="Scope", scale=1)
606
+ na_c = gr.Plot()
607
+ na_t = gr.Dataframe(label="North African Results")
608
+
609
+ def _na(m, b, s):
610
+ return fn_region(NORTH_AFRICAN, "North African", m, b, s)
611
+ for w in [na_m, na_b, na_s]:
612
+ w.change(_na, [na_m, na_b, na_s], [na_c, na_t])
613
+ demo.load(_na, [na_m, na_b, na_s], [na_c, na_t])
614
+
615
+ # ── African Languages ────────────────────────────────────
616
+ with gr.Tab("African Languages"):
617
+ gr.Markdown("Sub-Saharan, North African Arabic & Amazigh.")
618
+ with gr.Row():
619
+ af_m = gr.Dropdown(MODELS,
620
+ value=["gherbal-v4", "glotlid", "openlid-v2", "nllb-lid"],
621
+ multiselect=True, label="Models", scale=3)
622
+ af_b = gr.Dropdown(BENCH_ALL, value="__average__",
623
+ label="Benchmark", scale=2)
624
+ af_s = gr.Dropdown(SCOPES, value="v4", label="Scope", scale=1)
625
+ af_c = gr.Plot()
626
+ af_t = gr.Dataframe(label="African Language Results")
627
+
628
+ def _af(m, b, s):
629
+ return fn_region(AFRICAN, "African Languages", m, b, s)
630
+ for w in [af_m, af_b, af_s]:
631
+ w.change(_af, [af_m, af_b, af_s], [af_c, af_t])
632
+ demo.load(_af, [af_m, af_b, af_s], [af_c, af_t])
633
+
634
+ # ── Confusions ───────────────────────────────────────────
635
  with gr.Tab("Confusions"):
636
+ gr.Markdown("Most confused language pairs. Sorted worst-first.")
637
  with gr.Row():
638
+ cf_m = gr.Dropdown(MODELS, value="gherbal-v4",
639
+ label="Model", scale=2)
640
+ cf_b = gr.Dropdown(BENCHMARKS, value="flores-devtest",
641
+ label="Benchmark", scale=2)
642
+ cf_s = gr.Dropdown(SCOPES, value="v4", label="Scope", scale=1)
643
+ cf_min = gr.Slider(1, 500, 50, step=10,
644
+ label="Min samples", scale=1)
645
+ cf_t = gr.Dataframe(label="Confusion Pairs")
646
+ for w in [cf_m, cf_b, cf_s, cf_min]:
647
+ w.change(fn_confusions, [cf_m, cf_b, cf_s, cf_min], cf_t)
648
+ demo.load(fn_confusions, [cf_m, cf_b, cf_s, cf_min], cf_t)
649
+
650
+ # ── Compare ──────────────────────────────────────────────
 
 
 
 
 
 
 
 
651
  with gr.Tab("Compare"):
652
+ gr.Markdown("Radar chart across **all 8 benchmarks**.")
653
  with gr.Row():
654
+ mc_m = gr.Dropdown(MODELS,
655
+ value=["gherbal-v4", "glotlid", "openlid-v2", "nllb-lid"],
656
+ multiselect=True, label="Models", scale=3)
657
+ mc_s = gr.Dropdown(SCOPES, value="v4", label="Scope", scale=1)
658
+ mc_c = gr.Plot()
659
+
660
+ for w in [mc_m, mc_s]:
661
+ w.change(fn_compare, [mc_m, mc_s], mc_c)
662
+ demo.load(fn_compare, [mc_m, mc_s], mc_c)
663
+
664
+ gr.HTML(
665
+ '<div style="text-align:center;padding:16px 0 8px;color:#8A8078;'
666
+ 'font-size:.85em;">'
667
+ 'Data: <a href="https://huggingface.co/datasets/omneity-labs/'
668
+ 'lid-benchmark" style="color:#C4962C;">omneity-labs/lid-benchmark</a>'
669
+ ' \u00b7 Model: <a href="https://www.omneitylabs.com/models/gherbal"'
670
+ ' style="color:#C4962C;">Gherbal</a>'
671
+ ' \u00b7 <a href="https://omarkamali.com" style="color:#C4962C;">'
672
+ 'Omar Kamali</a>'
673
+ ' \u00b7 <a href="https://www.omneitylabs.com" style="color:#C4962C;">'
674
+ 'Omneity Labs</a></div>'
675
+ )
 
 
 
 
 
 
 
 
 
676
 
677
  if __name__ == "__main__":
678
+ print("[startup] launching (SSR off)...", flush=True)
679
+ demo.launch(ssr_mode=False, server_name="0.0.0.0")