bruAristimunha commited on
Commit
5763bf2
·
1 Parent(s): ee472e7

Align frontend and backend with adapter_finetuning paper

Browse files

Replace all LLM-specific references with real EEG benchmark data:
- 8 EEG datasets (BCIC-2a, PhysioNet, ISRUC, TUAB, TUEV, CHB-MIT, FACED, SEED-V)
- 7 adapter methods (LoRA, IA3, AdaLoRA, DoRA, OFT, Probe, Full Fine-tune)
- 7 foundation models (LaBraM, EEGPT, BIOT, BENDR, SignalJEPA, CBraMod, REVE)
- Remove official providers, MoE, flagged, merged features
- Update params from billions to millions scale
- Add real citations and dataset descriptions from the paper

Files changed (19) hide show
  1. backend/app/main.py +1 -1
  2. backend/app/services/leaderboard.py +96 -42
  3. frontend/src/pages/AboutPage/AboutPage.js +217 -35
  4. frontend/src/pages/AddModelPage/components/ModelSubmissionForm/ModelSubmissionForm.js +10 -7
  5. frontend/src/pages/AddModelPage/components/SubmissionGuide/SubmissionGuide.js +23 -18
  6. frontend/src/pages/LeaderboardPage/components/Leaderboard/Leaderboard.js +1 -2
  7. frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/FilteredModelCount.js +4 -32
  8. frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/Filters.js +237 -406
  9. frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/QuickFilters.js +5 -55
  10. frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/SearchBar.js +2 -5
  11. frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/hooks/useOfficialProvidersMode.js +4 -125
  12. frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/hooks/useDataProcessing.js +2 -6
  13. frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/defaults.js +70 -17
  14. frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/modelTypes.js +46 -21
  15. frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/tooltips.js +134 -59
  16. frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useDataUtils.js +5 -27
  17. frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useLeaderboardData.js +1 -4
  18. frontend/src/pages/LeaderboardPage/components/Leaderboard/utils/columnUtils.js +80 -110
  19. frontend/src/pages/QuotePage/QuotePage.js +125 -21
backend/app/main.py CHANGED
@@ -6,7 +6,7 @@ import logging
6
  setup_logging()
7
  logger = logging.getLogger(__name__)
8
 
9
- app = FastAPI(title="Open LLM Leaderboard API")
10
 
11
  @app.on_event("startup")
12
  async def startup_event():
 
6
  setup_logging()
7
  logger = logging.getLogger(__name__)
8
 
9
+ app = FastAPI(title="EEG Finetune Arena API")
10
 
11
  @app.on_event("startup")
12
  async def startup_event():
backend/app/services/leaderboard.py CHANGED
@@ -96,27 +96,71 @@ class LeaderboardService:
96
  raise HTTPException(status_code=500, detail=str(e))
97
 
98
  async def transform_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
99
- """Transform raw data into the format expected by the frontend"""
 
 
 
 
 
 
 
 
 
100
  try:
101
- # Extract model name for logging
102
  model_name = data.get("fullname", "Unknown")
103
  logger.debug(LogFormatter.info(f"Transforming data for model: {model_name}"))
104
 
105
- # Create unique ID combining model name, precision and sha
106
- unique_id = f"{data.get('fullname', 'Unknown')}_{data.get('Precision', 'Unknown')}_{data.get('Model sha', 'Unknown')}"
 
 
 
 
 
107
 
108
- # EEG benchmark evaluations (placeholders matching current dataset schema)
 
109
  evaluations = {
110
- "anli": {
111
- "name": "ANLI",
112
- "value": data.get("ANLI Raw", 0),
113
- "normalized_score": data.get("ANLI", 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  },
115
- "logiqa": {
116
- "name": "LogiQA",
117
- "value": data.get("LogiQA Raw", 0),
118
- "normalized_score": data.get("LogiQA", 0)
119
- }
120
  }
121
 
122
  features = {
@@ -126,39 +170,49 @@ class LeaderboardService:
126
  metadata = {
127
  "upload_date": data.get("Upload To Hub Date"),
128
  "submission_date": data.get("Submission Date"),
129
- "generation": data.get("Generation"),
130
  "base_model": data.get("Base Model"),
131
  "hub_license": data.get("Hub License"),
132
- "hub_hearts": data.get("Hub ❤️"),
133
  "params_millions": data.get("#Params (M)"),
 
 
 
134
  }
135
 
136
- # Clean model type by removing emojis if present
137
- original_type = data.get("Type", "")
138
- model_type = original_type.lower().strip()
139
-
140
- # Remove emojis and parentheses
141
- if "(" in model_type:
142
- model_type = model_type.split("(")[0].strip()
143
- model_type = ''.join(c for c in model_type if not c in '🟢🔶🧪🏗️ ')
144
-
145
- # Map model types for EEG domain
146
- model_type_mapping = {
147
- "fine-tuned": "fine-tuned",
148
- "fine tuned": "fine-tuned",
149
- "finetuned": "fine-tuned",
150
- "fine_tuned": "fine-tuned",
151
- "ft": "fine-tuned",
152
- "pretrained": "pretrained",
153
- "pre-trained": "pretrained",
154
- "task-specific": "task-specific",
155
- "foundation": "foundation",
156
  }
157
 
158
- mapped_type = model_type_mapping.get(model_type.lower().strip(), model_type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
- if mapped_type != model_type:
161
- logger.debug(LogFormatter.info(f"Model type mapped: {original_type} -> {mapped_type}"))
162
 
163
  transformed_data = {
164
  "id": unique_id,
@@ -168,12 +222,12 @@ class LeaderboardService:
168
  "precision": data.get("Precision"),
169
  "type": mapped_type,
170
  "weight_type": data.get("Weight type"),
171
- "architecture": data.get("Architecture"),
172
- "average_score": data.get("Average ⬆️"),
173
  },
174
  "evaluations": evaluations,
175
  "features": features,
176
- "metadata": metadata
177
  }
178
 
179
  logger.debug(LogFormatter.success(f"Successfully transformed data for {model_name}"))
 
96
  raise HTTPException(status_code=500, detail=str(e))
97
 
98
  async def transform_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
99
+ """Transform raw data into the format expected by the frontend.
100
+
101
+ Evaluations correspond to EEG downstream datasets used in the
102
+ Parameter-Efficient Fine-Tuning benchmark for EEG Foundation Models:
103
+ - Motor Imagery: BCIC-2a (4-class), PhysioNet MI (4-class)
104
+ - Sleep Staging: ISRUC-SLEEP (5-class)
105
+ - Pathology Detection: TUAB (binary), TUEV (6-class)
106
+ - Seizure Detection: CHB-MIT (binary)
107
+ - Emotion Recognition: FACED (9-class), SEED-V (5-class)
108
+ """
109
  try:
 
110
  model_name = data.get("fullname", "Unknown")
111
  logger.debug(LogFormatter.info(f"Transforming data for model: {model_name}"))
112
 
113
+ # Create unique ID combining model name, adapter method and dataset
114
+ unique_id = (
115
+ f"{data.get('fullname', 'Unknown')}"
116
+ f"_{data.get('adapter', 'Unknown')}"
117
+ f"_{data.get('Precision', 'Unknown')}"
118
+ f"_{data.get('Model sha', 'Unknown')}"
119
+ )
120
 
121
+ # EEG benchmark evaluations each dataset is a separate evaluation
122
+ # Scores are accuracy (0-1 raw) and normalized to 0-100 scale
123
  evaluations = {
124
+ "bcic2a": {
125
+ "name": "BCIC-2a",
126
+ "value": data.get("bcic2a_accuracy", 0),
127
+ "normalized_score": data.get("bcic2a_accuracy", 0) * 100 if data.get("bcic2a_accuracy") else 0,
128
+ },
129
+ "physionet": {
130
+ "name": "PhysioNet MI",
131
+ "value": data.get("physionet_accuracy", 0),
132
+ "normalized_score": data.get("physionet_accuracy", 0) * 100 if data.get("physionet_accuracy") else 0,
133
+ },
134
+ "isruc_sleep": {
135
+ "name": "ISRUC-SLEEP",
136
+ "value": data.get("isruc_sleep_accuracy", 0),
137
+ "normalized_score": data.get("isruc_sleep_accuracy", 0) * 100 if data.get("isruc_sleep_accuracy") else 0,
138
+ },
139
+ "tuab": {
140
+ "name": "TUAB",
141
+ "value": data.get("tuab_accuracy", 0),
142
+ "normalized_score": data.get("tuab_accuracy", 0) * 100 if data.get("tuab_accuracy") else 0,
143
+ },
144
+ "tuev": {
145
+ "name": "TUEV",
146
+ "value": data.get("tuev_accuracy", 0),
147
+ "normalized_score": data.get("tuev_accuracy", 0) * 100 if data.get("tuev_accuracy") else 0,
148
+ },
149
+ "chbmit": {
150
+ "name": "CHB-MIT",
151
+ "value": data.get("chbmit_accuracy", 0),
152
+ "normalized_score": data.get("chbmit_accuracy", 0) * 100 if data.get("chbmit_accuracy") else 0,
153
+ },
154
+ "faced": {
155
+ "name": "FACED",
156
+ "value": data.get("faced_accuracy", 0),
157
+ "normalized_score": data.get("faced_accuracy", 0) * 100 if data.get("faced_accuracy") else 0,
158
+ },
159
+ "seedv": {
160
+ "name": "SEED-V",
161
+ "value": data.get("seedv_accuracy", 0),
162
+ "normalized_score": data.get("seedv_accuracy", 0) * 100 if data.get("seedv_accuracy") else 0,
163
  },
 
 
 
 
 
164
  }
165
 
166
  features = {
 
170
  metadata = {
171
  "upload_date": data.get("Upload To Hub Date"),
172
  "submission_date": data.get("Submission Date"),
 
173
  "base_model": data.get("Base Model"),
174
  "hub_license": data.get("Hub License"),
175
+ "hub_hearts": data.get("Hub \u2764\uFE0F"),
176
  "params_millions": data.get("#Params (M)"),
177
+ "adapter_method": data.get("adapter"),
178
+ "embed_dim": data.get("embed_dim"),
179
+ "trainable_params": data.get("trainable_params"),
180
  }
181
 
182
+ # Adapter method / fine-tuning approach determines the model "type"
183
+ original_adapter = data.get("adapter", "")
184
+ adapter_lower = original_adapter.lower().strip() if original_adapter else ""
185
+
186
+ adapter_type_mapping = {
187
+ "lora": "lora",
188
+ "ia3": "ia3",
189
+ "adalora": "adalora",
190
+ "dora": "dora",
191
+ "oft": "oft",
192
+ "probe": "probe",
193
+ "full_finetune": "full_finetune",
194
+ "full": "full_finetune",
 
 
 
 
 
 
 
195
  }
196
 
197
+ mapped_type = adapter_type_mapping.get(adapter_lower, adapter_lower)
198
+
199
+ # Architecture is the foundation model name
200
+ original_arch = data.get("Architecture", "") or data.get("model", "")
201
+ arch_lower = original_arch.lower().strip() if original_arch else ""
202
+
203
+ arch_mapping = {
204
+ "labram": "LaBraM",
205
+ "labram_small": "LaBraM-Small",
206
+ "eegpt": "EEGPT",
207
+ "biot": "BIOT",
208
+ "bendr": "BENDR",
209
+ "signal_jepa": "SignalJEPA",
210
+ "signaljepa": "SignalJEPA",
211
+ "cbramod": "CBraMod",
212
+ "reve": "REVE",
213
+ }
214
 
215
+ architecture = arch_mapping.get(arch_lower, original_arch)
 
216
 
217
  transformed_data = {
218
  "id": unique_id,
 
222
  "precision": data.get("Precision"),
223
  "type": mapped_type,
224
  "weight_type": data.get("Weight type"),
225
+ "architecture": architecture,
226
+ "average_score": data.get("Average \u2B06\uFE0F"),
227
  },
228
  "evaluations": evaluations,
229
  "features": features,
230
+ "metadata": metadata,
231
  }
232
 
233
  logger.debug(LogFormatter.success(f"Successfully transformed data for {model_name}"))
frontend/src/pages/AboutPage/AboutPage.js CHANGED
@@ -7,7 +7,7 @@ function AboutPage() {
7
  <Box sx={{ width: "100%", maxWidth: 1200, margin: "0 auto", py: 4, px: 0 }}>
8
  <PageHeader
9
  title="About the EEG Finetune Arena"
10
- subtitle="Understanding our benchmarks and methodology"
11
  />
12
 
13
  <Paper
@@ -24,24 +24,40 @@ function AboutPage() {
24
  What is the EEG Finetune Arena?
25
  </Typography>
26
  <Typography variant="body1" color="text.secondary" paragraph>
27
- The EEG Finetune Arena is an open leaderboard for evaluating and
28
- comparing EEG (electroencephalography) models. We provide a
29
- standardized evaluation pipeline to assess how well different models
30
- perform on EEG-related tasks, enabling fair and reproducible
31
- comparisons across the community.
 
32
  </Typography>
33
  <Typography variant="body1" color="text.secondary" paragraph>
34
- Built on top of the{" "}
35
  <Link
36
  href="https://braindecode.org"
37
  target="_blank"
38
  rel="noopener noreferrer"
39
  >
40
  braindecode
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  </Link>{" "}
42
- library, the arena supports a variety of model architectures including
43
- pretrained models, fine-tuned models, task-specific models, and
44
- foundation models.
45
  </Typography>
46
  </Paper>
47
 
@@ -56,31 +72,117 @@ function AboutPage() {
56
  }}
57
  >
58
  <Typography variant="h5" sx={{ mb: 2 }}>
59
- Benchmarks
60
- </Typography>
61
- <Typography variant="body1" color="text.secondary" paragraph>
62
- Models are currently evaluated on the following benchmarks:
63
  </Typography>
64
  <Box component="ul" sx={{ pl: 3 }}>
65
  <li>
66
  <Typography variant="body1" color="text.secondary" paragraph>
67
- <strong>ANLI (Adversarial Natural Language Inference)</strong>:
68
- Tests the model's ability to perform natural language inference on
69
- adversarially constructed examples, evaluating robustness and
70
- reasoning capabilities.
 
 
 
 
 
71
  </Typography>
72
  </li>
73
  <li>
74
  <Typography variant="body1" color="text.secondary" paragraph>
75
- <strong>LogiQA (Logical Reasoning QA)</strong>: Evaluates logical
76
- reasoning abilities through multiple-choice questions covering
77
- categorical, conditional, disjunctive, and conjunctive reasoning.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  </Typography>
79
  </li>
80
  </Box>
81
- <Typography variant="body2" color="text.secondary">
82
- Additional EEG-specific benchmarks will be added as the arena evolves.
 
 
 
 
 
 
 
 
 
 
 
 
83
  </Typography>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  </Paper>
85
 
86
  <Paper
@@ -94,32 +196,88 @@ function AboutPage() {
94
  }}
95
  >
96
  <Typography variant="h5" sx={{ mb: 2 }}>
97
- Model Types
 
 
 
 
 
 
 
 
98
  </Typography>
99
  <Box component="ul" sx={{ pl: 3 }}>
 
 
 
 
 
 
100
  <li>
101
  <Typography variant="body1" color="text.secondary" paragraph>
102
- <strong>{"\u{1F7E2}"} Pretrained</strong>: Base EEG models trained
103
- with self-supervised learning on raw EEG data.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  </Typography>
105
  </li>
106
  <li>
107
  <Typography variant="body1" color="text.secondary" paragraph>
108
- <strong>{"\u{1F536}"} Fine-tuned</strong>: Models fine-tuned on
109
- specific EEG datasets for particular downstream tasks.
 
110
  </Typography>
111
  </li>
 
 
 
 
 
 
112
  <li>
113
  <Typography variant="body1" color="text.secondary" paragraph>
114
- <strong>{"\u{1F9EA}"} Task-specific</strong>: Models designed for
115
- specific EEG tasks such as sleep staging, motor imagery, or seizure
116
- detection.
 
 
 
 
 
 
 
 
 
 
 
117
  </Typography>
118
  </li>
119
  <li>
120
  <Typography variant="body1" color="text.secondary" paragraph>
121
- <strong>{"\u{1F3D7}\u{FE0F}"} Foundation</strong>: Large-scale EEG
122
- foundation models trained on diverse EEG datasets.
123
  </Typography>
124
  </li>
125
  </Box>
@@ -145,9 +303,33 @@ function AboutPage() {
145
  target="_blank"
146
  rel="noopener noreferrer"
147
  >
148
- Braindecode Documentation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  </Link>{" "}
150
- - The deep learning toolbox for EEG decoding
151
  </Typography>
152
  </li>
153
  <li>
 
7
  <Box sx={{ width: "100%", maxWidth: 1200, margin: "0 auto", py: 4, px: 0 }}>
8
  <PageHeader
9
  title="About the EEG Finetune Arena"
10
+ subtitle="Parameter-Efficient Fine-Tuning Benchmark for EEG Foundation Models"
11
  />
12
 
13
  <Paper
 
24
  What is the EEG Finetune Arena?
25
  </Typography>
26
  <Typography variant="body1" color="text.secondary" paragraph>
27
+ The EEG Finetune Arena is an open leaderboard for comparing
28
+ parameter-efficient fine-tuning (PEFT) methods applied to EEG
29
+ foundation models. We provide a standardized evaluation pipeline
30
+ across diverse EEG downstream tasks, enabling fair and reproducible
31
+ comparisons of how well different adapter methods can adapt
32
+ pre-trained EEG models.
33
  </Typography>
34
  <Typography variant="body1" color="text.secondary" paragraph>
35
+ Built on top of{" "}
36
  <Link
37
  href="https://braindecode.org"
38
  target="_blank"
39
  rel="noopener noreferrer"
40
  >
41
  braindecode
42
+ </Link>
43
+ ,{" "}
44
+ <Link
45
+ href="https://moabb.neurotechx.com"
46
+ target="_blank"
47
+ rel="noopener noreferrer"
48
+ >
49
+ MOABB
50
+ </Link>
51
+ , and the{" "}
52
+ <Link
53
+ href="https://huggingface.co/docs/peft"
54
+ target="_blank"
55
+ rel="noopener noreferrer"
56
+ >
57
+ HuggingFace PEFT
58
  </Link>{" "}
59
+ library, the arena evaluates combinations of 7 foundation models with
60
+ 7 adapter methods across 14 EEG datasets.
 
61
  </Typography>
62
  </Paper>
63
 
 
72
  }}
73
  >
74
  <Typography variant="h5" sx={{ mb: 2 }}>
75
+ Foundation Models
 
 
 
76
  </Typography>
77
  <Box component="ul" sx={{ pl: 3 }}>
78
  <li>
79
  <Typography variant="body1" color="text.secondary" paragraph>
80
+ <strong>LaBraM</strong> - Vision Transformer for EEG with neural
81
+ tokenization (12 layers, 200D embedding). Pre-trained on large-scale
82
+ EEG data.
83
+ </Typography>
84
+ </li>
85
+ <li>
86
+ <Typography variant="body1" color="text.secondary" paragraph>
87
+ <strong>EEGPT</strong> - Transformer with patch-based EEG
88
+ tokenization (~10M params, 8 layers, 512D embedding).
89
  </Typography>
90
  </li>
91
  <li>
92
  <Typography variant="body1" color="text.secondary" paragraph>
93
+ <strong>BIOT</strong> - Linear Attention Transformer for efficient
94
+ EEG processing (4 layers, 256D embedding).
95
+ </Typography>
96
+ </li>
97
+ <li>
98
+ <Typography variant="body1" color="text.secondary" paragraph>
99
+ <strong>BENDR</strong> - CNN + BERT-inspired Transformer encoder
100
+ (8 layers, 512D).
101
+ </Typography>
102
+ </li>
103
+ <li>
104
+ <Typography variant="body1" color="text.secondary" paragraph>
105
+ <strong>SignalJEPA</strong> - CNN + Transformer with JEPA-style
106
+ predictive self-supervised learning.
107
+ </Typography>
108
+ </li>
109
+ <li>
110
+ <Typography variant="body1" color="text.secondary" paragraph>
111
+ <strong>CBraMod</strong> - Criss-Cross Transformer with separate
112
+ spatial and temporal attention (~4M params, 12 layers, 200D).
113
+ </Typography>
114
+ </li>
115
+ <li>
116
+ <Typography variant="body1" color="text.secondary" paragraph>
117
+ <strong>REVE</strong> - Vision Transformer with GEGLU and visual
118
+ encoding (22 layers, 512D embedding).
119
  </Typography>
120
  </li>
121
  </Box>
122
+ </Paper>
123
+
124
+ <Paper
125
+ elevation={0}
126
+ sx={{
127
+ p: 4,
128
+ mb: 4,
129
+ border: "1px solid",
130
+ borderColor: "grey.200",
131
+ borderRadius: 2,
132
+ }}
133
+ >
134
+ <Typography variant="h5" sx={{ mb: 2 }}>
135
+ Adapter Methods
136
  </Typography>
137
+ <Box component="ul" sx={{ pl: 3 }}>
138
+ <li>
139
+ <Typography variant="body1" color="text.secondary" paragraph>
140
+ <strong>{"\u{1F9E9}"} LoRA</strong> - Low-Rank Adaptation (r=16,
141
+ alpha=32): injects trainable low-rank matrices, ~98% parameter
142
+ reduction.
143
+ </Typography>
144
+ </li>
145
+ <li>
146
+ <Typography variant="body1" color="text.secondary" paragraph>
147
+ <strong>{"\u{1F4A1}"} IA3</strong> - Infused Adapter by Inhibiting
148
+ and Amplifying Inner Activations: only learns scaling vectors,
149
+ ~99.5% parameter reduction.
150
+ </Typography>
151
+ </li>
152
+ <li>
153
+ <Typography variant="body1" color="text.secondary" paragraph>
154
+ <strong>{"\u{1F3AF}"} AdaLoRA</strong> - Adaptive Low-Rank
155
+ Adaptation: dynamic rank allocation across layers for optimal
156
+ budget distribution.
157
+ </Typography>
158
+ </li>
159
+ <li>
160
+ <Typography variant="body1" color="text.secondary" paragraph>
161
+ <strong>{"\u{1F52C}"} DoRA</strong> - Weight-Decomposed Low-Rank
162
+ Adaptation: decomposes weights into magnitude and direction
163
+ components.
164
+ </Typography>
165
+ </li>
166
+ <li>
167
+ <Typography variant="body1" color="text.secondary" paragraph>
168
+ <strong>{"\u{1F504}"} OFT</strong> - Orthogonal Fine-Tuning:
169
+ applies orthogonal transformations to preserve pre-trained
170
+ features.
171
+ </Typography>
172
+ </li>
173
+ <li>
174
+ <Typography variant="body1" color="text.secondary" paragraph>
175
+ <strong>{"\u{1F50D}"} Probe</strong> - Linear probing baseline:
176
+ freezes the encoder and trains only the classification head.
177
+ </Typography>
178
+ </li>
179
+ <li>
180
+ <Typography variant="body1" color="text.secondary" paragraph>
181
+ <strong>{"\u{1F527}"} Full Fine-tune</strong> - Updates all model
182
+ parameters (baseline for comparison).
183
+ </Typography>
184
+ </li>
185
+ </Box>
186
  </Paper>
187
 
188
  <Paper
 
196
  }}
197
  >
198
  <Typography variant="h5" sx={{ mb: 2 }}>
199
+ EEG Benchmarks
200
+ </Typography>
201
+ <Typography variant="body1" color="text.secondary" paragraph>
202
+ Models are evaluated across 8 primary downstream datasets spanning
203
+ diverse EEG tasks:
204
+ </Typography>
205
+
206
+ <Typography variant="h6" sx={{ mt: 2, mb: 1 }}>
207
+ Motor Imagery
208
  </Typography>
209
  <Box component="ul" sx={{ pl: 3 }}>
210
+ <li>
211
+ <Typography variant="body1" color="text.secondary">
212
+ <strong>BCIC-2a</strong> (BCI Competition IV 2a) - 4-class MI
213
+ (left hand, right hand, feet, tongue), 9 subjects, 22 channels
214
+ </Typography>
215
+ </li>
216
  <li>
217
  <Typography variant="body1" color="text.secondary" paragraph>
218
+ <strong>PhysioNet MI</strong> - 4-class MI (left hand, right hand,
219
+ feet, both hands), 109 subjects, 64 channels
220
+ </Typography>
221
+ </li>
222
+ </Box>
223
+
224
+ <Typography variant="h6" sx={{ mt: 2, mb: 1 }}>
225
+ Sleep Staging
226
+ </Typography>
227
+ <Box component="ul" sx={{ pl: 3 }}>
228
+ <li>
229
+ <Typography variant="body1" color="text.secondary" paragraph>
230
+ <strong>ISRUC-SLEEP</strong> (Group I) - 5-class sleep staging
231
+ (W, N1, N2, N3, REM), ~100 subjects, 6 channels, 30s windows
232
+ </Typography>
233
+ </li>
234
+ </Box>
235
+
236
+ <Typography variant="h6" sx={{ mt: 2, mb: 1 }}>
237
+ Pathology Detection
238
+ </Typography>
239
+ <Box component="ul" sx={{ pl: 3 }}>
240
+ <li>
241
+ <Typography variant="body1" color="text.secondary">
242
+ <strong>TUAB</strong> (TUH Abnormal v3.0.1) - Binary (normal /
243
+ abnormal), 290+ subjects, 16 channels, 10s windows
244
  </Typography>
245
  </li>
246
  <li>
247
  <Typography variant="body1" color="text.secondary" paragraph>
248
+ <strong>TUEV</strong> (TUH Events v2.0.1) - 6-class event
249
+ classification (SPSW, GPED, PLED, EYEM, ARTF, BCKG), 200+
250
+ subjects, 21 channels
251
  </Typography>
252
  </li>
253
+ </Box>
254
+
255
+ <Typography variant="h6" sx={{ mt: 2, mb: 1 }}>
256
+ Seizure Detection
257
+ </Typography>
258
+ <Box component="ul" sx={{ pl: 3 }}>
259
  <li>
260
  <Typography variant="body1" color="text.secondary" paragraph>
261
+ <strong>CHB-MIT</strong> - Binary seizure detection, 23 pediatric
262
+ subjects, 17 channels, 10s windows
263
+ </Typography>
264
+ </li>
265
+ </Box>
266
+
267
+ <Typography variant="h6" sx={{ mt: 2, mb: 1 }}>
268
+ Emotion Recognition
269
+ </Typography>
270
+ <Box component="ul" sx={{ pl: 3 }}>
271
+ <li>
272
+ <Typography variant="body1" color="text.secondary">
273
+ <strong>FACED</strong> - 9-class discrete emotion recognition, 123
274
+ subjects, 26 channels
275
  </Typography>
276
  </li>
277
  <li>
278
  <Typography variant="body1" color="text.secondary" paragraph>
279
+ <strong>SEED-V</strong> - 5-class emotion recognition (Happy, Sad,
280
+ Neutral, Disgust, Fear), 62 channels
281
  </Typography>
282
  </li>
283
  </Box>
 
303
  target="_blank"
304
  rel="noopener noreferrer"
305
  >
306
+ Braindecode
307
+ </Link>{" "}
308
+ - Deep learning toolbox for EEG decoding
309
+ </Typography>
310
+ </li>
311
+ <li>
312
+ <Typography variant="body1" color="text.secondary" paragraph>
313
+ <Link
314
+ href="https://moabb.neurotechx.com"
315
+ target="_blank"
316
+ rel="noopener noreferrer"
317
+ >
318
+ MOABB
319
+ </Link>{" "}
320
+ - Mother of All BCI Benchmarks
321
+ </Typography>
322
+ </li>
323
+ <li>
324
+ <Typography variant="body1" color="text.secondary" paragraph>
325
+ <Link
326
+ href="https://huggingface.co/docs/peft"
327
+ target="_blank"
328
+ rel="noopener noreferrer"
329
+ >
330
+ HuggingFace PEFT
331
  </Link>{" "}
332
+ - Parameter-Efficient Fine-Tuning library
333
  </Typography>
334
  </li>
335
  <li>
frontend/src/pages/AddModelPage/components/ModelSubmissionForm/ModelSubmissionForm.js CHANGED
@@ -56,13 +56,16 @@ const HELP_TEXTS = {
56
  modelType: (
57
  <Box sx={{ p: 1 }}>
58
  <Typography variant="subtitle2" sx={{ fontWeight: 600, mb: 0.5 }}>
59
- Model Category
60
  </Typography>
61
  <Typography variant="body2" sx={{ opacity: 0.9, lineHeight: 1.4 }}>
62
- {"\u{1F7E2}"} Pretrained: Base EEG models trained with self-supervised learning{" "}
63
- {"\u{1F536}"} Fine-tuned: Models fine-tuned on specific EEG datasets{" "}
64
- {"\u{1F9EA}"} Task-specific: Models designed for specific EEG tasks{" "}
65
- {"\u{1F3D7}\u{FE0F}"} Foundation: Large-scale EEG foundation models
 
 
 
66
  </Typography>
67
  </Box>
68
  ),
@@ -117,7 +120,7 @@ function ModelSubmissionForm({ user, isAuthenticated }) {
117
  const [formData, setFormData] = useState({
118
  modelName: "",
119
  revision: "main",
120
- modelType: "fine-tuned",
121
  precision: "float16",
122
  weightsType: "Original",
123
  baseModel: "",
@@ -298,7 +301,7 @@ function ModelSubmissionForm({ user, isAuthenticated }) {
298
  setFormData({
299
  modelName: "",
300
  revision: "main",
301
- modelType: "fine-tuned",
302
  precision: "float16",
303
  weightsType: "Original",
304
  baseModel: "",
 
56
  modelType: (
57
  <Box sx={{ p: 1 }}>
58
  <Typography variant="subtitle2" sx={{ fontWeight: 600, mb: 0.5 }}>
59
+ Adapter / Fine-tuning Method
60
  </Typography>
61
  <Typography variant="body2" sx={{ opacity: 0.9, lineHeight: 1.4 }}>
62
+ {"\u{1F9E9}"} LoRA: Low-Rank Adaptation (~98% param reduction){" "}
63
+ {"\u{1F4A1}"} IA3: Scaling vectors only (~99.5% reduction){" "}
64
+ {"\u{1F3AF}"} AdaLoRA: Adaptive rank allocation{" "}
65
+ {"\u{1F52C}"} DoRA: Weight-decomposed adaptation{" "}
66
+ {"\u{1F504}"} OFT: Orthogonal fine-tuning{" "}
67
+ {"\u{1F50D}"} Probe: Linear probing (head only){" "}
68
+ {"\u{1F527}"} Full Fine-tune: All parameters updated
69
  </Typography>
70
  </Box>
71
  ),
 
120
  const [formData, setFormData] = useState({
121
  modelName: "",
122
  revision: "main",
123
+ modelType: "lora",
124
  precision: "float16",
125
  weightsType: "Original",
126
  baseModel: "",
 
301
  setFormData({
302
  modelName: "",
303
  revision: "main",
304
+ modelType: "lora",
305
  precision: "float16",
306
  weightsType: "Original",
307
  baseModel: "",
frontend/src/pages/AddModelPage/components/SubmissionGuide/SubmissionGuide.js CHANGED
@@ -58,8 +58,9 @@ const TUTORIAL_STEPS = [
58
  <Typography variant="body2" color="text.secondary">
59
  Your model should be <strong>public</strong> on the Hub and follow the{" "}
60
  <strong>username/model-id</strong> format (e.g.
61
- braindecode/EEGNetv4). Specify the <strong>revision</strong>{" "}
62
- (commit hash or branch) and <strong>model type</strong>.
 
63
  </Typography>
64
  <DocLink href="https://huggingface.co/docs/hub/models-uploading">
65
  Model uploading guide
@@ -73,7 +74,8 @@ const TUTORIAL_STEPS = [
73
  <Stack spacing={2}>
74
  <Typography variant="body2" color="text.secondary">
75
  Make sure your model can be <strong>loaded locally</strong> before
76
- submitting:
 
77
  </Typography>
78
  <Box
79
  sx={{
@@ -92,17 +94,20 @@ const TUTORIAL_STEPS = [
92
  }}
93
  >
94
  <pre>
95
- {`import braindecode
96
- from braindecode.models import EEGNetv4
97
 
98
- # Load your model
99
- model = EEGNetv4(
100
- n_chans=22,
101
- n_outputs=4,
102
- n_times=1000,
 
 
 
103
  )
104
- # Or from Hugging Face Hub
105
- # model = braindecode.models.load("your-username/your-model", revision="main")`}
106
  </pre>
107
  </Box>
108
  <DocLink href="https://braindecode.org/stable/api.html">
@@ -131,11 +136,11 @@ model = EEGNetv4(
131
  content: (
132
  <Stack spacing={2}>
133
  <Typography variant="body2" color="text.secondary">
134
- Your model card must include: <strong>architecture</strong>,{" "}
 
135
  <strong>training details</strong>,{" "}
136
  <strong>dataset information</strong> (EEG paradigm, number of channels,
137
- sampling rate), intended use, limitations, and{" "}
138
- <strong>performance metrics</strong>.
139
  </Typography>
140
  <DocLink href="https://huggingface.co/docs/hub/model-cards">
141
  Model cards guide
@@ -151,10 +156,10 @@ model = EEGNetv4(
151
  Ensure your model is <strong>public</strong>, uses{" "}
152
  <strong>safetensors</strong> format, has a{" "}
153
  <strong>license tag</strong>, and <strong>loads correctly</strong>{" "}
154
- with the provided code.
155
  </Typography>
156
- <DocLink href="https://huggingface.co/docs/hub/repositories-getting-started">
157
- Sharing best practices
158
  </DocLink>
159
  </Stack>
160
  ),
 
58
  <Typography variant="body2" color="text.secondary">
59
  Your model should be <strong>public</strong> on the Hub and follow the{" "}
60
  <strong>username/model-id</strong> format (e.g.
61
+ braindecode/labram-lora-bcic2a). Specify the <strong>revision</strong>{" "}
62
+ (commit hash or branch) and <strong>adapter method</strong> used
63
+ for fine-tuning.
64
  </Typography>
65
  <DocLink href="https://huggingface.co/docs/hub/models-uploading">
66
  Model uploading guide
 
74
  <Stack spacing={2}>
75
  <Typography variant="body2" color="text.secondary">
76
  Make sure your model can be <strong>loaded locally</strong> before
77
+ submitting. The arena evaluates EEG foundation models with PEFT
78
+ adapters:
79
  </Typography>
80
  <Box
81
  sx={{
 
94
  }}
95
  >
96
  <pre>
97
+ {`from braindecode.models import LaBraM
98
+ from peft import get_peft_model, LoraConfig
99
 
100
+ # Load the foundation model
101
+ model = LaBraM(n_chans=22, n_outputs=4, n_times=1000)
102
+
103
+ # Apply a PEFT adapter (e.g., LoRA)
104
+ peft_config = LoraConfig(
105
+ r=16, lora_alpha=32,
106
+ target_modules=["qkv"],
107
+ lora_dropout=0.1,
108
  )
109
+ model = get_peft_model(model, peft_config)
110
+ print(model.print_trainable_parameters())`}
111
  </pre>
112
  </Box>
113
  <DocLink href="https://braindecode.org/stable/api.html">
 
136
  content: (
137
  <Stack spacing={2}>
138
  <Typography variant="body2" color="text.secondary">
139
+ Your model card must include: <strong>foundation model</strong> (e.g.
140
+ LaBraM, EEGPT, BIOT), <strong>adapter method</strong> (LoRA, IA3, etc.),{" "}
141
  <strong>training details</strong>,{" "}
142
  <strong>dataset information</strong> (EEG paradigm, number of channels,
143
+ sampling rate), and <strong>performance metrics</strong>.
 
144
  </Typography>
145
  <DocLink href="https://huggingface.co/docs/hub/model-cards">
146
  Model cards guide
 
156
  Ensure your model is <strong>public</strong>, uses{" "}
157
  <strong>safetensors</strong> format, has a{" "}
158
  <strong>license tag</strong>, and <strong>loads correctly</strong>{" "}
159
+ with braindecode + PEFT.
160
  </Typography>
161
+ <DocLink href="https://huggingface.co/docs/peft">
162
+ HuggingFace PEFT documentation
163
  </DocLink>
164
  </Stack>
165
  ),
frontend/src/pages/LeaderboardPage/components/Leaderboard/Leaderboard.js CHANGED
@@ -221,8 +221,7 @@ const Leaderboard = () => {
221
  const hasValidFilterCounts =
222
  state.countsReady &&
223
  state.filterCounts &&
224
- state.filterCounts.normal &&
225
- state.filterCounts.officialOnly;
226
 
227
  return {
228
  isInitialLoading,
 
221
  const hasValidFilterCounts =
222
  state.countsReady &&
223
  state.filterCounts &&
224
+ state.filterCounts.normal;
 
225
 
226
  return {
227
  isInitialLoading,
frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/FilteredModelCount.js CHANGED
@@ -5,8 +5,6 @@ import { useLeaderboard } from "../../context/LeaderboardContext";
5
 
6
  const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
7
  const { state } = useLeaderboard();
8
- const isOfficialProviderActive = state.filters.isOfficialProviderActive;
9
- const { officialOnly: officialOnlyCounts } = state.filterCounts;
10
 
11
  return useMemo(() => {
12
  if (loading) {
@@ -15,12 +13,9 @@ const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
15
  currentFilteredCount: 0,
16
  totalPinnedCount: 0,
17
  filteredPinnedCount: 0,
18
- isOfficialProviderActive,
19
  };
20
  }
21
- const displayCount = isOfficialProviderActive
22
- ? officialOnlyCounts.officialProviders
23
- : totalCount;
24
 
25
  // Calculate total number of pinned models
26
  const totalPinnedCount =
@@ -33,7 +28,6 @@ const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
33
  paramsRange: state.filters.paramsRange,
34
  searchValue: state.filters.search,
35
  selectedBooleanFilters: state.filters.booleanFilters,
36
- isOfficialProviderActive: state.filters.isOfficialProviderActive,
37
  };
38
 
39
  // Check each pinned model if it would pass filters without its pinned status
@@ -41,18 +35,6 @@ const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
41
  data?.filter((model) => {
42
  if (!model.isPinned) return false;
43
 
44
- // Check each filter criteria
45
-
46
- // Filter by official providers
47
- if (filterConfig.isOfficialProviderActive) {
48
- if (
49
- !model.features?.is_official_provider &&
50
- !model.metadata?.is_official_provider
51
- ) {
52
- return false;
53
- }
54
- }
55
-
56
  // Filter by precision
57
  if (filterConfig.selectedPrecisions.length > 0) {
58
  if (
@@ -74,8 +56,8 @@ const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
74
  }
75
  }
76
 
77
- // Filter by parameters
78
- const params = model.metadata.params_billions;
79
  if (
80
  params < filterConfig.paramsRange[0] ||
81
  params >= filterConfig.paramsRange[1]
@@ -99,12 +81,6 @@ const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
99
  const filterValue =
100
  typeof filter === "object" ? filter.value : filter;
101
 
102
- // Maintainer's Highlight keeps positive logic
103
- if (filterValue === "is_official_provider") {
104
- return model.features[filterValue];
105
- }
106
-
107
- // For all other filters, invert the logic
108
  if (filterValue === "is_not_available_on_hub") {
109
  return model.features[filterValue];
110
  }
@@ -125,7 +101,6 @@ const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
125
  currentFilteredCount: filteredCount,
126
  totalPinnedCount,
127
  filteredPinnedCount,
128
- isOfficialProviderActive,
129
  };
130
  }, [
131
  loading,
@@ -133,8 +108,6 @@ const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
133
  filteredCount,
134
  data,
135
  state.filters,
136
- isOfficialProviderActive,
137
- officialOnlyCounts.officialProviders,
138
  ]);
139
  };
140
 
@@ -205,7 +178,6 @@ const FilteredModelCount = React.memo(
205
  currentFilteredCount,
206
  totalPinnedCount,
207
  filteredPinnedCount,
208
- isOfficialProviderActive,
209
  } = useModelCount({
210
  totalCount,
211
  filteredCount,
@@ -233,7 +205,7 @@ const FilteredModelCount = React.memo(
233
  <CountTypography value="/" loading={loading} />
234
  <CountTypography
235
  value={displayCount}
236
- color={isOfficialProviderActive ? "secondary.main" : "text.primary"}
237
  loading={loading}
238
  />
239
  </Box>
 
5
 
6
  const useModelCount = ({ totalCount, filteredCount, data, table, loading }) => {
7
  const { state } = useLeaderboard();
 
 
8
 
9
  return useMemo(() => {
10
  if (loading) {
 
13
  currentFilteredCount: 0,
14
  totalPinnedCount: 0,
15
  filteredPinnedCount: 0,
 
16
  };
17
  }
18
+ const displayCount = totalCount;
 
 
19
 
20
  // Calculate total number of pinned models
21
  const totalPinnedCount =
 
28
  paramsRange: state.filters.paramsRange,
29
  searchValue: state.filters.search,
30
  selectedBooleanFilters: state.filters.booleanFilters,
 
31
  };
32
 
33
  // Check each pinned model if it would pass filters without its pinned status
 
35
  data?.filter((model) => {
36
  if (!model.isPinned) return false;
37
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  // Filter by precision
39
  if (filterConfig.selectedPrecisions.length > 0) {
40
  if (
 
56
  }
57
  }
58
 
59
+ // Filter by parameters (in millions for EEG)
60
+ const params = model.metadata.params_millions;
61
  if (
62
  params < filterConfig.paramsRange[0] ||
63
  params >= filterConfig.paramsRange[1]
 
81
  const filterValue =
82
  typeof filter === "object" ? filter.value : filter;
83
 
 
 
 
 
 
 
84
  if (filterValue === "is_not_available_on_hub") {
85
  return model.features[filterValue];
86
  }
 
101
  currentFilteredCount: filteredCount,
102
  totalPinnedCount,
103
  filteredPinnedCount,
 
104
  };
105
  }, [
106
  loading,
 
108
  filteredCount,
109
  data,
110
  state.filters,
 
 
111
  ]);
112
  };
113
 
 
178
  currentFilteredCount,
179
  totalPinnedCount,
180
  filteredPinnedCount,
 
181
  } = useModelCount({
182
  totalCount,
183
  filteredCount,
 
205
  <CountTypography value="/" loading={loading} />
206
  <CountTypography
207
  value={displayCount}
208
+ color="text.primary"
209
  loading={loading}
210
  />
211
  </Box>
frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/Filters.js CHANGED
@@ -31,7 +31,7 @@ import { COLUMN_TOOLTIPS } from "../../constants/tooltips";
31
 
32
  const getTooltipContent = (title) => {
33
  switch (title) {
34
- case "Model Type":
35
  return COLUMN_TOOLTIPS.ARCHITECTURE;
36
  case "Precision format":
37
  return COLUMN_TOOLTIPS.PRECISION;
@@ -64,7 +64,7 @@ const FilterGroup = ({
64
  const handleInputChange = useCallback(
65
  (index) => (event) => {
66
  const value = event.target.value === "" ? "" : Number(event.target.value);
67
- if (value === "" || (value >= -1 && value <= 140)) {
68
  const newRange = [...localParamsRange];
69
  newRange[index] = value;
70
  setLocalParamsRange(newRange);
@@ -130,7 +130,7 @@ const FilterGroup = ({
130
  type="number"
131
  inputProps={{
132
  min: -1,
133
- max: 140,
134
  style: {
135
  width: "45px",
136
  textAlign: "center",
@@ -153,7 +153,7 @@ const FilterGroup = ({
153
  type="number"
154
  inputProps={{
155
  min: -1,
156
- max: 140,
157
  style: {
158
  width: "45px",
159
  textAlign: "center",
@@ -175,15 +175,16 @@ const FilterGroup = ({
175
  onChange={handleLocalRangeChange}
176
  valueLabelDisplay="auto"
177
  min={-1}
178
- max={140}
179
  step={1}
180
  marks={[
181
  { value: -1, label: "All" },
182
- { value: 7, label: "7" },
183
- { value: 70, label: "70" },
184
- { value: 140, label: "140" },
 
185
  ]}
186
- valueLabelFormat={(value) => (value === -1 ? "All" : `${value}B`)}
187
  sx={{
188
  "& .MuiSlider-rail": {
189
  height: 10,
@@ -238,7 +239,7 @@ const FilterGroup = ({
238
  "& .MuiSlider-markLabel": {
239
  fontSize: "0.875rem",
240
  "&::after": {
241
- content: '"B"',
242
  marginLeft: "1px",
243
  opacity: 0.5,
244
  },
@@ -336,7 +337,7 @@ const LeaderboardFilters = ({
336
  onPrecisionsChange = () => {},
337
  selectedTypes = MODEL_TYPE_ORDER,
338
  onTypesChange = () => {},
339
- paramsRange = [-1, 140],
340
  onParamsRangeChange = () => {},
341
  selectedBooleanFilters = [],
342
  onBooleanFiltersChange = () => {},
@@ -348,13 +349,7 @@ const LeaderboardFilters = ({
348
  const [localParamsRange, setLocalParamsRange] = useState(paramsRange);
349
  const stableTimerRef = useRef(null);
350
  const { state, actions } = useLeaderboard();
351
- const { normal: filterCounts, officialOnly: officialOnlyCounts } =
352
- state.filterCounts;
353
- const isOfficialProviderActive = state.filters.isOfficialProviderActive;
354
- const currentCounts = useMemo(
355
- () => (isOfficialProviderActive ? officialOnlyCounts : filterCounts),
356
- [isOfficialProviderActive, officialOnlyCounts, filterCounts]
357
- );
358
 
359
  useEffect(() => {
360
  setLocalParamsRange(paramsRange);
@@ -403,17 +398,10 @@ const LeaderboardFilters = ({
403
  };
404
 
405
  // Filter options based on their hide property
406
- const showFilterOptions = BOOLEAN_FILTER_OPTIONS.filter(
407
- (option) => !option.hide
408
- );
409
  const hideFilterOptions = BOOLEAN_FILTER_OPTIONS.filter(
410
  (option) => option.hide
411
  );
412
 
413
- const handleOfficialProviderToggle = () => {
414
- actions.toggleOfficialProvider();
415
- };
416
-
417
  return loading ? null : (
418
  <Box>
419
  <Accordion
@@ -443,403 +431,246 @@ const LeaderboardFilters = ({
443
  }}
444
  >
445
  <Box>
446
- <Grid container spacing={3}>
447
- <Grid item xs={12} md={9} sx={{ display: "flex" }}>
448
- <Box
449
- sx={{
450
- backgroundColor: (theme) =>
451
- alpha(theme.palette.primary.main, 0.02),
452
- border: "1px solid",
453
- borderColor: (theme) =>
454
- alpha(theme.palette.primary.main, 0.2),
455
- borderRadius: 1,
456
- p: 3,
457
- position: "relative",
458
- width: "100%",
459
- display: "flex",
460
- flexDirection: "column",
461
- "&:hover": {
462
- borderColor: (theme) =>
463
- alpha(theme.palette.primary.main, 0.3),
464
- backgroundColor: (theme) =>
465
- alpha(theme.palette.primary.main, 0.03),
466
- },
467
- transition: (theme) =>
468
- theme.transitions.create(
469
- ["border-color", "background-color"],
470
- {
471
- duration: theme.transitions.duration.short,
472
- }
473
- ),
474
- }}
475
- >
476
- <Typography
477
- variant="h6"
478
- sx={{
479
- mb: 3,
480
- fontWeight: 600,
481
- color: "text.primary",
482
- fontSize: "1.1rem",
483
- }}
484
- >
485
- Advanced Filters
486
- </Typography>
487
- <Box sx={{ flex: 1 }}>
488
- <Grid container spacing={3} sx={{ flex: 1 }}>
489
- <Grid item xs={12} md={6}>
490
- <Box>
491
- <FilterGroup
492
- title="Precision format"
493
- tooltip={COLUMN_TOOLTIPS.PRECISION}
494
- >
495
- {FILTER_PRECISIONS.map((precision) => (
496
- <FilterTag
497
- key={precision}
498
- label={precision}
499
- checked={selectedPrecisions.includes(precision)}
500
- onChange={() =>
501
- handlePrecisionToggle(precision)
502
- }
503
- count={currentCounts.precisions[precision]}
504
- showCheckbox={true}
505
- />
506
- ))}
507
- </FilterGroup>
508
- </Box>
509
- </Grid>
510
-
511
- <Grid item xs={12} md={6}>
512
- <Box sx={{ position: "relative" }}>
513
- <FilterGroup
514
- title="Parameters"
515
- tooltip={COLUMN_TOOLTIPS.PARAMETERS}
516
- paramsRange={paramsRange}
517
- onParamsRangeChange={onParamsRangeChange}
518
- >
519
- <Box
520
- sx={{
521
- width: "100%",
522
- display: "flex",
523
- alignItems: "center",
524
- gap: 2,
525
- }}
526
- >
527
- <Box sx={{ flex: 1 }}>
528
- <Slider
529
- value={localParamsRange}
530
- onChange={handleParamsRangeChange}
531
- onChangeCommitted={
532
- handleParamsRangeChangeCommitted
533
- }
534
- valueLabelDisplay="auto"
535
- min={-1}
536
- max={140}
537
- step={1}
538
- marks={[
539
- { value: -1, label: "" },
540
- { value: 0, label: "0" },
541
- { value: 7, label: "7" },
542
- { value: 70, label: "70" },
543
- { value: 140, label: "140" },
544
- ]}
545
- sx={{
546
- "& .MuiSlider-rail": {
547
- height: 10,
548
- backgroundColor: "background.paper",
549
- border: "1px solid",
550
- borderColor: "divider",
551
- opacity: 1,
552
- },
553
- "& .MuiSlider-track": {
554
- height: 10,
555
- border: "1px solid",
556
- borderColor: (theme) =>
557
- alpha(
558
- theme.palette.primary.main,
559
- theme.palette.mode === "light"
560
- ? 0.3
561
- : 0.5
562
- ),
563
- backgroundColor: (theme) =>
564
- alpha(
565
- theme.palette.primary.main,
566
- theme.palette.mode === "light"
567
- ? 0.1
568
- : 0.2
569
- ),
570
- },
571
- "& .MuiSlider-thumb": {
572
- width: 20,
573
- height: 20,
574
- backgroundColor: "background.paper",
575
- border: "1px solid",
576
- borderColor: "primary.main",
577
- "&:hover, &.Mui-focusVisible": {
578
- boxShadow: (theme) =>
579
- `0 0 0 8px ${alpha(
580
- theme.palette.primary.main,
581
- theme.palette.mode === "light"
582
- ? 0.08
583
- : 0.16
584
- )}`,
585
- },
586
- "&.Mui-active": {
587
- boxShadow: (theme) =>
588
- `0 0 0 12px ${alpha(
589
- theme.palette.primary.main,
590
- theme.palette.mode === "light"
591
- ? 0.08
592
- : 0.16
593
- )}`,
594
- },
595
- },
596
- "& .MuiSlider-mark": {
597
- backgroundColor: "text.disabled",
598
- height: 2,
599
- width: 2,
600
- borderRadius: "50%",
601
- },
602
- "& .MuiSlider-markLabel": {
603
- color: "text.secondary",
604
- },
605
- }}
606
- />
607
- </Box>
608
- </Box>
609
- </FilterGroup>
610
- </Box>
611
- </Grid>
612
-
613
- {/* Deuxième ligne */}
614
- <Grid item xs={12} md={6}>
615
- <Box>
616
- <FilterGroup
617
- title="Model Type"
618
- tooltip={COLUMN_TOOLTIPS.ARCHITECTURE}
619
- >
620
- {MODEL_TYPE_ORDER.sort(
621
- (a, b) =>
622
- MODEL_TYPES[a].order - MODEL_TYPES[b].order
623
- ).map((type) => (
624
- <FilterTag
625
- key={type}
626
- label={`${MODEL_TYPES[type]?.icon} ${
627
- MODEL_TYPES[type]?.label || type
628
- }`}
629
- checked={selectedTypes.includes(type)}
630
- onChange={() => {
631
- const newTypes = selectedTypes.includes(type)
632
- ? selectedTypes.filter((t) => t !== type)
633
- : [...selectedTypes, type];
634
- onTypesChange(newTypes);
635
- }}
636
- count={currentCounts.modelTypes[type]}
637
- variant="tag"
638
- showCheckbox={true}
639
- />
640
- ))}
641
- </FilterGroup>
642
- </Box>
643
- </Grid>
644
-
645
- <Grid item xs={12} md={6}>
646
- <Box>
647
- <FilterGroup
648
- title="Flags"
649
- tooltip={COLUMN_TOOLTIPS.FLAGS}
650
- >
651
- {hideFilterOptions.map((filter) => (
652
- <FilterTag
653
- key={filter.value}
654
- label={filter.label}
655
- checked={
656
- !selectedBooleanFilters.includes(filter.value)
657
- }
658
- onChange={() => {
659
- const newFilters =
660
- selectedBooleanFilters.includes(
661
- filter.value
662
- )
663
- ? selectedBooleanFilters.filter(
664
- (f) => f !== filter.value
665
- )
666
- : [
667
- ...selectedBooleanFilters,
668
- filter.value,
669
- ];
670
- onBooleanFiltersChange(newFilters);
671
- }}
672
- count={
673
- filter.value === "is_moe"
674
- ? currentCounts.mixtureOfExperts
675
- : filter.value === "is_flagged"
676
- ? currentCounts.flagged
677
- : filter.value === "is_merged"
678
- ? currentCounts.merged
679
- : filter.value === "is_not_available_on_hub"
680
- ? currentCounts.notOnHub
681
- : 0
682
- }
683
- isHideFilter={false}
684
- totalCount={data.length}
685
- showCheckbox={true}
686
- />
687
- ))}
688
- </FilterGroup>
689
- </Box>
690
- </Grid>
691
- </Grid>
692
- </Box>
693
- </Box>
694
- </Grid>
695
-
696
- <Grid item xs={12} md={3} sx={{ display: "flex" }}>
697
- <Box
698
- sx={{
699
- backgroundColor: (theme) =>
700
- alpha(theme.palette.secondary.main, 0.02),
701
- border: "1px solid",
702
- borderColor: (theme) =>
703
- alpha(theme.palette.secondary.main, 0.15),
704
- borderRadius: 1,
705
- p: 3,
706
- position: "relative",
707
- width: "100%",
708
- display: "flex",
709
- flexDirection: "column",
710
- alignItems: "center",
711
- justifyContent: "center",
712
- textAlign: "center",
713
- minHeight: "100%",
714
- "&:hover": {
715
- borderColor: (theme) =>
716
- alpha(theme.palette.secondary.main, 0.25),
717
- backgroundColor: (theme) =>
718
- alpha(theme.palette.secondary.main, 0.03),
719
- },
720
- transition: (theme) =>
721
- theme.transitions.create(
722
- ["border-color", "background-color"],
723
- {
724
- duration: theme.transitions.duration.short,
725
- }
726
- ),
727
- }}
728
- >
729
- <Box
730
- sx={{
731
- display: "flex",
732
- flexDirection: "column",
733
- alignItems: "center",
734
- gap: 2,
735
- }}
736
- >
737
- <Typography
738
- variant="h6"
739
- sx={{
740
- fontWeight: 600,
741
- color: "text.primary",
742
- fontSize: "1.1rem",
743
- display: "flex",
744
- alignItems: "center",
745
- gap: 1,
746
- }}
747
- >
748
- Official Models
749
- </Typography>
750
- <Typography
751
- variant="body2"
752
- sx={{
753
- color: "text.secondary",
754
- fontSize: "0.8rem",
755
- lineHeight: 1.4,
756
- maxWidth: "280px",
757
- }}
758
- >
759
- Show only models that are officially provided and
760
- maintained by their original creators.
761
- </Typography>
762
- <Box
763
- sx={{
764
- display: "flex",
765
- flexDirection: "column",
766
- gap: 1,
767
- width: "100%",
768
- alignItems: "center",
769
- }}
770
- >
771
- {showFilterOptions.map((filter) => (
772
  <Box
773
- key={filter.value}
774
  sx={{
 
775
  display: "flex",
776
- flexDirection: "column",
777
  alignItems: "center",
778
- gap: 1,
779
  }}
780
  >
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
781
  <FilterTag
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
782
  label={filter.label}
783
  checked={
784
- filter.value === "is_official_provider"
785
- ? isOfficialProviderActive
786
- : selectedBooleanFilters.includes(filter.value)
787
- }
788
- onChange={
789
- filter.value === "is_official_provider"
790
- ? handleOfficialProviderToggle
791
- : () => handleBooleanFilterToggle(filter.value)
792
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
793
  count={
794
- filter.value === "is_official_provider"
795
- ? currentCounts.officialProviders
796
  : 0
797
  }
 
 
798
  showCheckbox={true}
799
- variant="secondary"
800
  />
801
- <Box
802
- sx={{
803
- display: "flex",
804
- alignItems: "center",
805
- gap: 0.5,
806
- color: "text.secondary",
807
- fontSize: "0.75rem",
808
- }}
809
- >
810
- <Box
811
- component="span"
812
- sx={{
813
- width: 6,
814
- height: 6,
815
- borderRadius: "50%",
816
- backgroundColor: (
817
- filter.value ===
818
- "is_official_provider"
819
- ? isOfficialProviderActive
820
- : selectedBooleanFilters.includes(
821
- filter.value
822
- )
823
- )
824
- ? "success.main"
825
- : "text.disabled",
826
- }}
827
- />
828
- {(
829
- filter.value === "is_official_provider"
830
- ? isOfficialProviderActive
831
- : selectedBooleanFilters.includes(filter.value)
832
- )
833
- ? "Filter active"
834
- : "Filter inactive"}
835
- </Box>
836
- </Box>
837
- ))}
838
  </Box>
839
- </Box>
840
- </Box>
841
- </Grid>
842
- </Grid>
843
  </Box>
844
  </AccordionDetails>
845
  </Accordion>
 
31
 
32
  const getTooltipContent = (title) => {
33
  switch (title) {
34
+ case "Adapter Method":
35
  return COLUMN_TOOLTIPS.ARCHITECTURE;
36
  case "Precision format":
37
  return COLUMN_TOOLTIPS.PRECISION;
 
64
  const handleInputChange = useCallback(
65
  (index) => (event) => {
66
  const value = event.target.value === "" ? "" : Number(event.target.value);
67
+ if (value === "" || (value >= -1 && value <= 500)) {
68
  const newRange = [...localParamsRange];
69
  newRange[index] = value;
70
  setLocalParamsRange(newRange);
 
130
  type="number"
131
  inputProps={{
132
  min: -1,
133
+ max: 500,
134
  style: {
135
  width: "45px",
136
  textAlign: "center",
 
153
  type="number"
154
  inputProps={{
155
  min: -1,
156
+ max: 500,
157
  style: {
158
  width: "45px",
159
  textAlign: "center",
 
175
  onChange={handleLocalRangeChange}
176
  valueLabelDisplay="auto"
177
  min={-1}
178
+ max={500}
179
  step={1}
180
  marks={[
181
  { value: -1, label: "All" },
182
+ { value: 10, label: "10" },
183
+ { value: 50, label: "50" },
184
+ { value: 200, label: "200" },
185
+ { value: 500, label: "500" },
186
  ]}
187
+ valueLabelFormat={(value) => (value === -1 ? "All" : `${value}M`)}
188
  sx={{
189
  "& .MuiSlider-rail": {
190
  height: 10,
 
239
  "& .MuiSlider-markLabel": {
240
  fontSize: "0.875rem",
241
  "&::after": {
242
+ content: '"M"',
243
  marginLeft: "1px",
244
  opacity: 0.5,
245
  },
 
337
  onPrecisionsChange = () => {},
338
  selectedTypes = MODEL_TYPE_ORDER,
339
  onTypesChange = () => {},
340
+ paramsRange = [-1, 500],
341
  onParamsRangeChange = () => {},
342
  selectedBooleanFilters = [],
343
  onBooleanFiltersChange = () => {},
 
349
  const [localParamsRange, setLocalParamsRange] = useState(paramsRange);
350
  const stableTimerRef = useRef(null);
351
  const { state, actions } = useLeaderboard();
352
+ const { normal: filterCounts } = state.filterCounts;
 
 
 
 
 
 
353
 
354
  useEffect(() => {
355
  setLocalParamsRange(paramsRange);
 
398
  };
399
 
400
  // Filter options based on their hide property
 
 
 
401
  const hideFilterOptions = BOOLEAN_FILTER_OPTIONS.filter(
402
  (option) => option.hide
403
  );
404
 
 
 
 
 
405
  return loading ? null : (
406
  <Box>
407
  <Accordion
 
431
  }}
432
  >
433
  <Box>
434
+ <Box
435
+ sx={{
436
+ backgroundColor: (theme) =>
437
+ alpha(theme.palette.primary.main, 0.02),
438
+ border: "1px solid",
439
+ borderColor: (theme) =>
440
+ alpha(theme.palette.primary.main, 0.2),
441
+ borderRadius: 1,
442
+ p: 3,
443
+ position: "relative",
444
+ width: "100%",
445
+ display: "flex",
446
+ flexDirection: "column",
447
+ "&:hover": {
448
+ borderColor: (theme) =>
449
+ alpha(theme.palette.primary.main, 0.3),
450
+ backgroundColor: (theme) =>
451
+ alpha(theme.palette.primary.main, 0.03),
452
+ },
453
+ transition: (theme) =>
454
+ theme.transitions.create(
455
+ ["border-color", "background-color"],
456
+ {
457
+ duration: theme.transitions.duration.short,
458
+ }
459
+ ),
460
+ }}
461
+ >
462
+ <Typography
463
+ variant="h6"
464
+ sx={{
465
+ mb: 3,
466
+ fontWeight: 600,
467
+ color: "text.primary",
468
+ fontSize: "1.1rem",
469
+ }}
470
+ >
471
+ Advanced Filters
472
+ </Typography>
473
+ <Box sx={{ flex: 1 }}>
474
+ <Grid container spacing={3} sx={{ flex: 1 }}>
475
+ <Grid item xs={12} md={6}>
476
+ <Box>
477
+ <FilterGroup
478
+ title="Precision format"
479
+ tooltip={COLUMN_TOOLTIPS.PRECISION}
480
+ >
481
+ {FILTER_PRECISIONS.map((precision) => (
482
+ <FilterTag
483
+ key={precision}
484
+ label={precision}
485
+ checked={selectedPrecisions.includes(precision)}
486
+ onChange={() =>
487
+ handlePrecisionToggle(precision)
488
+ }
489
+ count={filterCounts.precisions[precision]}
490
+ showCheckbox={true}
491
+ />
492
+ ))}
493
+ </FilterGroup>
494
+ </Box>
495
+ </Grid>
496
+
497
+ <Grid item xs={12} md={6}>
498
+ <Box sx={{ position: "relative" }}>
499
+ <FilterGroup
500
+ title="Parameters"
501
+ tooltip={COLUMN_TOOLTIPS.PARAMETERS}
502
+ paramsRange={paramsRange}
503
+ onParamsRangeChange={onParamsRangeChange}
504
+ >
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
  <Box
 
506
  sx={{
507
+ width: "100%",
508
  display: "flex",
 
509
  alignItems: "center",
510
+ gap: 2,
511
  }}
512
  >
513
+ <Box sx={{ flex: 1 }}>
514
+ <Slider
515
+ value={localParamsRange}
516
+ onChange={handleParamsRangeChange}
517
+ onChangeCommitted={
518
+ handleParamsRangeChangeCommitted
519
+ }
520
+ valueLabelDisplay="auto"
521
+ min={-1}
522
+ max={500}
523
+ step={1}
524
+ marks={[
525
+ { value: -1, label: "" },
526
+ { value: 0, label: "0" },
527
+ { value: 10, label: "10" },
528
+ { value: 50, label: "50" },
529
+ { value: 200, label: "200" },
530
+ { value: 500, label: "500" },
531
+ ]}
532
+ sx={{
533
+ "& .MuiSlider-rail": {
534
+ height: 10,
535
+ backgroundColor: "background.paper",
536
+ border: "1px solid",
537
+ borderColor: "divider",
538
+ opacity: 1,
539
+ },
540
+ "& .MuiSlider-track": {
541
+ height: 10,
542
+ border: "1px solid",
543
+ borderColor: (theme) =>
544
+ alpha(
545
+ theme.palette.primary.main,
546
+ theme.palette.mode === "light"
547
+ ? 0.3
548
+ : 0.5
549
+ ),
550
+ backgroundColor: (theme) =>
551
+ alpha(
552
+ theme.palette.primary.main,
553
+ theme.palette.mode === "light"
554
+ ? 0.1
555
+ : 0.2
556
+ ),
557
+ },
558
+ "& .MuiSlider-thumb": {
559
+ width: 20,
560
+ height: 20,
561
+ backgroundColor: "background.paper",
562
+ border: "1px solid",
563
+ borderColor: "primary.main",
564
+ "&:hover, &.Mui-focusVisible": {
565
+ boxShadow: (theme) =>
566
+ `0 0 0 8px ${alpha(
567
+ theme.palette.primary.main,
568
+ theme.palette.mode === "light"
569
+ ? 0.08
570
+ : 0.16
571
+ )}`,
572
+ },
573
+ "&.Mui-active": {
574
+ boxShadow: (theme) =>
575
+ `0 0 0 12px ${alpha(
576
+ theme.palette.primary.main,
577
+ theme.palette.mode === "light"
578
+ ? 0.08
579
+ : 0.16
580
+ )}`,
581
+ },
582
+ },
583
+ "& .MuiSlider-mark": {
584
+ backgroundColor: "text.disabled",
585
+ height: 2,
586
+ width: 2,
587
+ borderRadius: "50%",
588
+ },
589
+ "& .MuiSlider-markLabel": {
590
+ color: "text.secondary",
591
+ },
592
+ }}
593
+ />
594
+ </Box>
595
+ </Box>
596
+ </FilterGroup>
597
+ </Box>
598
+ </Grid>
599
+
600
+ <Grid item xs={12} md={6}>
601
+ <Box>
602
+ <FilterGroup
603
+ title="Adapter Method"
604
+ tooltip={COLUMN_TOOLTIPS.ARCHITECTURE}
605
+ >
606
+ {MODEL_TYPE_ORDER.sort(
607
+ (a, b) =>
608
+ MODEL_TYPES[a].order - MODEL_TYPES[b].order
609
+ ).map((type) => (
610
  <FilterTag
611
+ key={type}
612
+ label={`${MODEL_TYPES[type]?.icon} ${
613
+ MODEL_TYPES[type]?.label || type
614
+ }`}
615
+ checked={selectedTypes.includes(type)}
616
+ onChange={() => {
617
+ const newTypes = selectedTypes.includes(type)
618
+ ? selectedTypes.filter((t) => t !== type)
619
+ : [...selectedTypes, type];
620
+ onTypesChange(newTypes);
621
+ }}
622
+ count={filterCounts.modelTypes[type]}
623
+ variant="tag"
624
+ showCheckbox={true}
625
+ />
626
+ ))}
627
+ </FilterGroup>
628
+ </Box>
629
+ </Grid>
630
+
631
+ <Grid item xs={12} md={6}>
632
+ <Box>
633
+ <FilterGroup
634
+ title="Flags"
635
+ tooltip={COLUMN_TOOLTIPS.FLAGS}
636
+ >
637
+ {hideFilterOptions.map((filter) => (
638
+ <FilterTag
639
+ key={filter.value}
640
  label={filter.label}
641
  checked={
642
+ !selectedBooleanFilters.includes(filter.value)
 
 
 
 
 
 
 
643
  }
644
+ onChange={() => {
645
+ const newFilters =
646
+ selectedBooleanFilters.includes(
647
+ filter.value
648
+ )
649
+ ? selectedBooleanFilters.filter(
650
+ (f) => f !== filter.value
651
+ )
652
+ : [
653
+ ...selectedBooleanFilters,
654
+ filter.value,
655
+ ];
656
+ onBooleanFiltersChange(newFilters);
657
+ }}
658
  count={
659
+ filter.value === "is_not_available_on_hub"
660
+ ? filterCounts.notOnHub
661
  : 0
662
  }
663
+ isHideFilter={false}
664
+ totalCount={data.length}
665
  showCheckbox={true}
 
666
  />
667
+ ))}
668
+ </FilterGroup>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
669
  </Box>
670
+ </Grid>
671
+ </Grid>
672
+ </Box>
673
+ </Box>
674
  </Box>
675
  </AccordionDetails>
676
  </Accordion>
frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/QuickFilters.js CHANGED
@@ -65,41 +65,17 @@ export const QuickFiltersSkeleton = () => (
65
  }}
66
  />
67
  ))}
68
- <Skeleton
69
- height={32}
70
- sx={{
71
- width: { xs: "100%", md: 150 },
72
- borderRadius: 1,
73
- ml: 2,
74
- }}
75
- />
76
  </Box>
77
  </Box>
78
  );
79
 
80
  const QuickFilters = ({ totalCount = 0, loading = false }) => {
81
  const { state, actions } = useLeaderboard();
82
- const { normal: filterCounts, officialOnly: officialOnlyCounts } =
83
- state.filterCounts;
84
- const isOfficialProviderActive = state.filters.isOfficialProviderActive;
85
  const currentParams = state.filters.paramsRange;
86
 
87
- const currentCounts = useMemo(
88
- () => (isOfficialProviderActive ? officialOnlyCounts : filterCounts),
89
- [isOfficialProviderActive, officialOnlyCounts, filterCounts]
90
- );
91
-
92
  const modelSizePresets = useMemo(
93
- () =>
94
- QUICK_FILTER_PRESETS.filter(
95
- (preset) => preset.id !== "official_providers"
96
- ),
97
- []
98
- );
99
-
100
- const officialProvidersPreset = useMemo(
101
- () =>
102
- QUICK_FILTER_PRESETS.find((preset) => preset.id === "official_providers"),
103
  []
104
  );
105
 
@@ -110,7 +86,7 @@ const QuickFilters = ({ totalCount = 0, loading = false }) => {
110
  currentParams[1] === preset.filters.paramsRange[1];
111
 
112
  if (isActive) {
113
- actions.setFilter("paramsRange", [-1, 140]); // Reset to default
114
  } else {
115
  actions.setFilter("paramsRange", preset.filters.paramsRange);
116
  }
@@ -121,15 +97,11 @@ const QuickFilters = ({ totalCount = 0, loading = false }) => {
121
  const getPresetCount = useCallback(
122
  (preset) => {
123
  const range = preset.id.split("_")[0];
124
- return currentCounts.parameterRanges[range] || 0;
125
  },
126
- [currentCounts]
127
  );
128
 
129
- const handleOfficialProviderToggle = useCallback(() => {
130
- actions.toggleOfficialProvider();
131
- }, [actions]);
132
-
133
  if (loading) {
134
  return <QuickFiltersSkeleton />;
135
  }
@@ -194,28 +166,6 @@ const QuickFilters = ({ totalCount = 0, loading = false }) => {
194
  />
195
  ))}
196
  </Box>
197
-
198
- <Box
199
- sx={{
200
- width: { xs: "100%", md: "100%", lg: "auto" },
201
- display: "flex",
202
- }}
203
- >
204
- {officialProvidersPreset && (
205
- <FilterTag
206
- label={officialProvidersPreset.label}
207
- checked={isOfficialProviderActive}
208
- onChange={handleOfficialProviderToggle}
209
- count={currentCounts.officialProviders}
210
- totalCount={totalCount}
211
- showCheckbox={true}
212
- variant="secondary"
213
- sx={{
214
- width: { xs: "100%", md: "100%", lg: "auto" },
215
- }}
216
- />
217
- )}
218
- </Box>
219
  </Box>
220
  </Box>
221
  );
 
65
  }}
66
  />
67
  ))}
 
 
 
 
 
 
 
 
68
  </Box>
69
  </Box>
70
  );
71
 
72
  const QuickFilters = ({ totalCount = 0, loading = false }) => {
73
  const { state, actions } = useLeaderboard();
74
+ const { normal: filterCounts } = state.filterCounts;
 
 
75
  const currentParams = state.filters.paramsRange;
76
 
 
 
 
 
 
77
  const modelSizePresets = useMemo(
78
+ () => QUICK_FILTER_PRESETS,
 
 
 
 
 
 
 
 
 
79
  []
80
  );
81
 
 
86
  currentParams[1] === preset.filters.paramsRange[1];
87
 
88
  if (isActive) {
89
+ actions.setFilter("paramsRange", [-1, 500]); // Reset to default
90
  } else {
91
  actions.setFilter("paramsRange", preset.filters.paramsRange);
92
  }
 
97
  const getPresetCount = useCallback(
98
  (preset) => {
99
  const range = preset.id.split("_")[0];
100
+ return filterCounts.parameterRanges[range] || 0;
101
  },
102
+ [filterCounts]
103
  );
104
 
 
 
 
 
105
  if (loading) {
106
  return <QuickFiltersSkeleton />;
107
  }
 
166
  />
167
  ))}
168
  </Box>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  </Box>
170
  </Box>
171
  );
frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/SearchBar.js CHANGED
@@ -148,8 +148,7 @@ const SearchBar = ({
148
  state.filters.precisions.length !== FILTER_PRECISIONS.length ||
149
  state.filters.types.length !== MODEL_TYPE_ORDER.length ||
150
  state.filters.paramsRange[0] !== -1 ||
151
- state.filters.paramsRange[1] !== 140 ||
152
- state.filters.isOfficialProviderActive;
153
 
154
  const shouldShowReset = localValue || hasActiveFilters;
155
 
@@ -185,7 +184,7 @@ const SearchBar = ({
185
  <InputBase
186
  value={localValue}
187
  onChange={handleLocalChange}
188
- placeholder='Search by model name • try "meta @architecture:llama @license:mit"'
189
  sx={{
190
  flex: 1,
191
  fontSize: "1rem",
@@ -207,8 +206,6 @@ const SearchBar = ({
207
  filteredCount={data.length}
208
  hasFilterChanges={hasActiveFilters}
209
  loading={loading}
210
- isOfficialProviderActive={state.filters.isOfficialProviderActive}
211
- officialProvidersCount={state.filters.officialProvidersCount}
212
  size="large"
213
  data={data}
214
  table={table}
 
148
  state.filters.precisions.length !== FILTER_PRECISIONS.length ||
149
  state.filters.types.length !== MODEL_TYPE_ORDER.length ||
150
  state.filters.paramsRange[0] !== -1 ||
151
+ state.filters.paramsRange[1] !== 500;
 
152
 
153
  const shouldShowReset = localValue || hasActiveFilters;
154
 
 
184
  <InputBase
185
  value={localValue}
186
  onChange={handleLocalChange}
187
+ placeholder='Search by model name • try "labram @type:lora @architecture:labram"'
188
  sx={{
189
  flex: 1,
190
  fontSize: "1rem",
 
206
  filteredCount={data.length}
207
  hasFilterChanges={hasActiveFilters}
208
  loading={loading}
 
 
209
  size="large"
210
  data={data}
211
  table={table}
frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Filters/hooks/useOfficialProvidersMode.js CHANGED
@@ -1,130 +1,9 @@
1
- import { useCallback, useState, useEffect, useRef } from "react";
2
- import { useSearchParams } from "react-router-dom";
3
-
4
- const useRouterSearchParams = () => {
5
- try {
6
- return useSearchParams();
7
- } catch {
8
- return [null, () => {}];
9
- }
10
- };
11
 
12
  export const useOfficialProvidersMode = () => {
13
- const [isOfficialProviderActive, setIsOfficialProviderActive] =
14
- useState(false);
15
- const [searchParams, setSearchParams] = useRouterSearchParams();
16
- const normalFiltersRef = useRef(null);
17
- const isInitialLoadRef = useRef(true);
18
- const lastToggleSourceRef = useRef(null);
19
-
20
- // Effect to handle initial state and updates
21
- useEffect(() => {
22
- if (!searchParams) return;
23
-
24
- const filters = searchParams.get("filters");
25
- const isHighlighted =
26
- filters?.includes("is_official_provider") || false;
27
-
28
- // On initial load
29
- if (isInitialLoadRef.current) {
30
- isInitialLoadRef.current = false;
31
-
32
- // If official mode is active at start, store filters without the highlightFilter
33
- if (isHighlighted && filters) {
34
- const initialNormalFilters = filters
35
- .split(",")
36
- .filter((f) => f !== "is_official_provider" && f !== "")
37
- .filter(Boolean);
38
- if (initialNormalFilters.length > 0) {
39
- normalFiltersRef.current = initialNormalFilters.join(",");
40
- }
41
- }
42
-
43
- // Update state without triggering URL change
44
- setIsOfficialProviderActive(isHighlighted);
45
- return;
46
- }
47
-
48
- // For subsequent changes
49
- if (!isHighlighted && filters) {
50
- normalFiltersRef.current = filters;
51
- }
52
-
53
- setIsOfficialProviderActive(isHighlighted);
54
- }, [searchParams]);
55
-
56
- const toggleOfficialProviderMode = useCallback(
57
- (source = null) => {
58
- if (!searchParams || !setSearchParams) return;
59
-
60
- // If source is the same as last time and last change was less than 100ms ago, ignore
61
- const now = Date.now();
62
- if (
63
- source &&
64
- source === lastToggleSourceRef.current?.source &&
65
- now - (lastToggleSourceRef.current?.timestamp || 0) < 100
66
- ) {
67
- return;
68
- }
69
-
70
- const currentFiltersStr = searchParams.get("filters");
71
- const currentFilters =
72
- currentFiltersStr?.split(",").filter(Boolean) || [];
73
- const highlightFilter = "is_official_provider";
74
- const newSearchParams = new URLSearchParams(searchParams);
75
-
76
- if (currentFilters.includes(highlightFilter)) {
77
- // Deactivating official provider mode
78
- if (normalFiltersRef.current) {
79
- const normalFilters = normalFiltersRef.current
80
- .split(",")
81
- .filter((f) => f !== highlightFilter && f !== "")
82
- .filter(Boolean);
83
-
84
- if (normalFilters.length > 0) {
85
- newSearchParams.set("filters", normalFilters.join(","));
86
- } else {
87
- newSearchParams.delete("filters");
88
- }
89
- } else {
90
- const newFilters = currentFilters.filter(
91
- (f) => f !== highlightFilter && f !== ""
92
- );
93
- if (newFilters.length === 0) {
94
- newSearchParams.delete("filters");
95
- } else {
96
- newSearchParams.set("filters", newFilters.join(","));
97
- }
98
- }
99
- } else {
100
- // Activating official provider mode
101
- if (currentFiltersStr) {
102
- normalFiltersRef.current = currentFiltersStr;
103
- }
104
-
105
- const filtersToSet = [
106
- ...new Set([...currentFilters, highlightFilter]),
107
- ].filter(Boolean);
108
- newSearchParams.set("filters", filtersToSet.join(","));
109
- }
110
-
111
- // Update state immediately
112
- setIsOfficialProviderActive(!currentFilters.includes(highlightFilter));
113
-
114
- // Save source and timestamp of last change
115
- lastToggleSourceRef.current = {
116
- source,
117
- timestamp: now,
118
- };
119
-
120
- // Update search params and let HashRouter handle the URL
121
- setSearchParams(newSearchParams);
122
- },
123
- [searchParams, setSearchParams]
124
- );
125
-
126
  return {
127
- isOfficialProviderActive,
128
- toggleOfficialProviderMode,
129
  };
130
  };
 
1
+ // Official providers mode is not used in the EEG Finetune Arena.
2
+ // This hook is kept as a no-op for compatibility with any remaining references.
 
 
 
 
 
 
 
 
3
 
4
  export const useOfficialProvidersMode = () => {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  return {
6
+ isOfficialProviderActive: false,
7
+ toggleOfficialProviderMode: () => {},
8
  };
9
  };
frontend/src/pages/LeaderboardPage/components/Leaderboard/components/Table/hooks/useDataProcessing.js CHANGED
@@ -28,8 +28,7 @@ export const useDataProcessing = (
28
  scoreDisplay,
29
  pinnedModels,
30
  onTogglePin,
31
- setSorting,
32
- isOfficialProviderActive
33
  ) => {
34
  // Call hooks directly at root level
35
  const { minAverage, maxAverage } = useAverageRange(data);
@@ -47,7 +46,6 @@ export const useDataProcessing = (
47
  selectedBooleanFilters,
48
  rankingMode,
49
  pinnedModels,
50
- isOfficialProviderActive,
51
  }),
52
  [
53
  selectedPrecisions,
@@ -57,7 +55,6 @@ export const useDataProcessing = (
57
  selectedBooleanFilters,
58
  rankingMode,
59
  pinnedModels,
60
- isOfficialProviderActive,
61
  ]
62
  );
63
 
@@ -70,8 +67,7 @@ export const useDataProcessing = (
70
  filterConfig.searchValue,
71
  filterConfig.selectedBooleanFilters,
72
  filterConfig.rankingMode,
73
- filterConfig.pinnedModels,
74
- filterConfig.isOfficialProviderActive
75
  );
76
 
77
  // Memoize columns creation
 
28
  scoreDisplay,
29
  pinnedModels,
30
  onTogglePin,
31
+ setSorting
 
32
  ) => {
33
  // Call hooks directly at root level
34
  const { minAverage, maxAverage } = useAverageRange(data);
 
46
  selectedBooleanFilters,
47
  rankingMode,
48
  pinnedModels,
 
49
  }),
50
  [
51
  selectedPrecisions,
 
55
  selectedBooleanFilters,
56
  rankingMode,
57
  pinnedModels,
 
58
  ]
59
  );
60
 
 
67
  filterConfig.searchValue,
68
  filterConfig.selectedBooleanFilters,
69
  filterConfig.rankingMode,
70
+ filterConfig.pinnedModels
 
71
  );
72
 
73
  // Memoize columns creation
frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/defaults.js CHANGED
@@ -60,12 +60,14 @@ const COLUMN_SIZES = {
60
  LICENSE: 160,
61
  UPLOAD_DATE: 160,
62
  SUBMISSION_DATE: 200,
63
- GENERATION: 160,
64
  BASE_MODEL: 390,
65
  HUB_AVAILABILITY: 180,
 
 
66
  };
67
 
68
- // Column definitions with organized structure
 
69
  const COLUMNS = {
70
  FIXED: {
71
  rank: {
@@ -78,7 +80,7 @@ const COLUMNS = {
78
  group: "fixed",
79
  size: COLUMN_SIZES.TYPE_ICON,
80
  defaultVisible: true,
81
- label: "Type",
82
  },
83
  id: {
84
  group: "fixed",
@@ -94,17 +96,58 @@ const COLUMNS = {
94
  },
95
  },
96
  EVALUATION: {
97
- "evaluations.anli.normalized_score": {
 
98
  group: "evaluation",
99
  size: COLUMN_SIZES.BENCHMARK,
100
  defaultVisible: true,
101
- label: "ANLI",
102
  },
103
- "evaluations.logiqa.normalized_score": {
104
  group: "evaluation",
105
  size: COLUMN_SIZES.BENCHMARK,
106
  defaultVisible: true,
107
- label: "LogiQA",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  },
109
  },
110
  MODEL_INFO: {
@@ -117,8 +160,14 @@ const COLUMNS = {
117
  "model.architecture": {
118
  group: "model_info",
119
  size: COLUMN_SIZES.ARCHITECTURE,
 
 
 
 
 
 
120
  defaultVisible: false,
121
- label: "Architecture",
122
  },
123
  "model.precision": {
124
  group: "model_info",
@@ -132,6 +181,12 @@ const COLUMNS = {
132
  defaultVisible: false,
133
  label: "Parameters (M)",
134
  },
 
 
 
 
 
 
135
  "metadata.hub_license": {
136
  group: "model_info",
137
  size: COLUMN_SIZES.LICENSE,
@@ -152,12 +207,6 @@ const COLUMNS = {
152
  defaultVisible: false,
153
  label: "Submission Date",
154
  },
155
- "metadata.generation": {
156
- group: "additional_info",
157
- size: COLUMN_SIZES.GENERATION,
158
- defaultVisible: false,
159
- label: "Generation",
160
- },
161
  "metadata.base_model": {
162
  group: "additional_info",
163
  size: COLUMN_SIZES.BASE_MODEL,
@@ -281,9 +330,13 @@ export const HIGHLIGHT_COLORS = [
281
  export const SKELETON_COLUMNS = [
282
  40, // Checkbox
283
  COLUMN_SIZES.RANK, // Rank
284
- COLUMN_SIZES.TYPE_ICON, // Type icon
285
  COLUMN_SIZES.MODEL, // Model name
286
  COLUMN_SIZES.AVERAGE_SCORE, // Average score
287
- COLUMN_SIZES.BENCHMARK, // Benchmark 1 (ANLI)
288
- COLUMN_SIZES.BENCHMARK, // Benchmark 2 (LogiQA)
 
 
 
 
289
  ];
 
60
  LICENSE: 160,
61
  UPLOAD_DATE: 160,
62
  SUBMISSION_DATE: 200,
 
63
  BASE_MODEL: 390,
64
  HUB_AVAILABILITY: 180,
65
+ ADAPTER_METHOD: 160,
66
+ EMBED_DIM: 140,
67
  };
68
 
69
+ // Column definitions evaluation columns map to the 8 EEG downstream
70
+ // datasets from the PEFT benchmark for EEG Foundation Models.
71
  const COLUMNS = {
72
  FIXED: {
73
  rank: {
 
80
  group: "fixed",
81
  size: COLUMN_SIZES.TYPE_ICON,
82
  defaultVisible: true,
83
+ label: "Adapter",
84
  },
85
  id: {
86
  group: "fixed",
 
96
  },
97
  },
98
  EVALUATION: {
99
+ // Motor Imagery
100
+ "evaluations.bcic2a.normalized_score": {
101
  group: "evaluation",
102
  size: COLUMN_SIZES.BENCHMARK,
103
  defaultVisible: true,
104
+ label: "BCIC-2a",
105
  },
106
+ "evaluations.physionet.normalized_score": {
107
  group: "evaluation",
108
  size: COLUMN_SIZES.BENCHMARK,
109
  defaultVisible: true,
110
+ label: "PhysioNet",
111
+ },
112
+ // Sleep Staging
113
+ "evaluations.isruc_sleep.normalized_score": {
114
+ group: "evaluation",
115
+ size: COLUMN_SIZES.BENCHMARK,
116
+ defaultVisible: true,
117
+ label: "ISRUC",
118
+ },
119
+ // Pathology Detection
120
+ "evaluations.tuab.normalized_score": {
121
+ group: "evaluation",
122
+ size: COLUMN_SIZES.BENCHMARK,
123
+ defaultVisible: true,
124
+ label: "TUAB",
125
+ },
126
+ "evaluations.tuev.normalized_score": {
127
+ group: "evaluation",
128
+ size: COLUMN_SIZES.BENCHMARK,
129
+ defaultVisible: true,
130
+ label: "TUEV",
131
+ },
132
+ // Seizure Detection
133
+ "evaluations.chbmit.normalized_score": {
134
+ group: "evaluation",
135
+ size: COLUMN_SIZES.BENCHMARK,
136
+ defaultVisible: true,
137
+ label: "CHB-MIT",
138
+ },
139
+ // Emotion Recognition
140
+ "evaluations.faced.normalized_score": {
141
+ group: "evaluation",
142
+ size: COLUMN_SIZES.BENCHMARK,
143
+ defaultVisible: false,
144
+ label: "FACED",
145
+ },
146
+ "evaluations.seedv.normalized_score": {
147
+ group: "evaluation",
148
+ size: COLUMN_SIZES.BENCHMARK,
149
+ defaultVisible: false,
150
+ label: "SEED-V",
151
  },
152
  },
153
  MODEL_INFO: {
 
160
  "model.architecture": {
161
  group: "model_info",
162
  size: COLUMN_SIZES.ARCHITECTURE,
163
+ defaultVisible: true,
164
+ label: "Foundation Model",
165
+ },
166
+ "metadata.adapter_method": {
167
+ group: "model_info",
168
+ size: COLUMN_SIZES.ADAPTER_METHOD,
169
  defaultVisible: false,
170
+ label: "Adapter Method",
171
  },
172
  "model.precision": {
173
  group: "model_info",
 
181
  defaultVisible: false,
182
  label: "Parameters (M)",
183
  },
184
+ "metadata.embed_dim": {
185
+ group: "model_info",
186
+ size: COLUMN_SIZES.EMBED_DIM,
187
+ defaultVisible: false,
188
+ label: "Embed Dim",
189
+ },
190
  "metadata.hub_license": {
191
  group: "model_info",
192
  size: COLUMN_SIZES.LICENSE,
 
207
  defaultVisible: false,
208
  label: "Submission Date",
209
  },
 
 
 
 
 
 
210
  "metadata.base_model": {
211
  group: "additional_info",
212
  size: COLUMN_SIZES.BASE_MODEL,
 
330
  export const SKELETON_COLUMNS = [
331
  40, // Checkbox
332
  COLUMN_SIZES.RANK, // Rank
333
+ COLUMN_SIZES.TYPE_ICON, // Adapter icon
334
  COLUMN_SIZES.MODEL, // Model name
335
  COLUMN_SIZES.AVERAGE_SCORE, // Average score
336
+ COLUMN_SIZES.BENCHMARK, // BCIC-2a
337
+ COLUMN_SIZES.BENCHMARK, // PhysioNet
338
+ COLUMN_SIZES.BENCHMARK, // ISRUC
339
+ COLUMN_SIZES.BENCHMARK, // TUAB
340
+ COLUMN_SIZES.BENCHMARK, // TUEV
341
+ COLUMN_SIZES.BENCHMARK, // CHB-MIT
342
  ];
frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/modelTypes.js CHANGED
@@ -1,34 +1,59 @@
 
 
 
 
1
  export const MODEL_TYPE_ORDER = [
2
- 'pretrained',
3
- 'fine-tuned',
4
- 'task-specific',
5
- 'foundation'
 
 
 
6
  ];
7
 
8
  export const MODEL_TYPES = {
9
- 'pretrained': {
10
- icon: '\u{1F7E2}',
11
- label: 'Pretrained',
12
- description: 'Base EEG models trained with self-supervised learning',
13
  order: 0
14
  },
15
- 'fine-tuned': {
16
- icon: '\u{1F536}',
17
- label: 'Fine-tuned',
18
- description: 'Models fine-tuned on specific EEG datasets',
19
  order: 1
20
  },
21
- 'task-specific': {
22
- icon: '\u{1F9EA}',
23
- label: 'Task-specific',
24
- description: 'Models designed for specific EEG tasks (e.g., sleep staging, motor imagery)',
25
  order: 2
26
  },
27
- 'foundation': {
28
- icon: '\u{1F3D7}\u{FE0F}',
29
- label: 'Foundation',
30
- description: 'Large-scale EEG foundation models',
31
  order: 3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  }
33
  };
34
 
@@ -53,7 +78,7 @@ export const getModelTypeDescription = (type) => {
53
  const matchedType = Object.entries(MODEL_TYPES).find(([key]) =>
54
  cleanType.includes(key)
55
  );
56
- return matchedType ? matchedType[1].description : 'Unknown model type';
57
  };
58
 
59
  export const getModelTypeOrder = (type) => {
 
1
+ // Model "types" in the EEG Finetune Arena represent adapter / fine-tuning
2
+ // methods used with EEG foundation models (LaBraM, EEGPT, BIOT, BENDR,
3
+ // SignalJEPA, CBraMod, REVE).
4
+
5
  export const MODEL_TYPE_ORDER = [
6
+ 'lora',
7
+ 'ia3',
8
+ 'adalora',
9
+ 'dora',
10
+ 'oft',
11
+ 'probe',
12
+ 'full_finetune'
13
  ];
14
 
15
  export const MODEL_TYPES = {
16
+ 'lora': {
17
+ icon: '\u{1F9E9}',
18
+ label: 'LoRA',
19
+ description: 'Low-Rank Adaptation: injects trainable low-rank matrices into transformer layers (~98% parameter reduction)',
20
  order: 0
21
  },
22
+ 'ia3': {
23
+ icon: '\u{1F4A1}',
24
+ label: 'IA3',
25
+ description: 'Infused Adapter by Inhibiting and Amplifying Inner Activations (~99.5% parameter reduction)',
26
  order: 1
27
  },
28
+ 'adalora': {
29
+ icon: '\u{1F3AF}',
30
+ label: 'AdaLoRA',
31
+ description: 'Adaptive Low-Rank Adaptation with dynamic rank allocation across layers',
32
  order: 2
33
  },
34
+ 'dora': {
35
+ icon: '\u{1F52C}',
36
+ label: 'DoRA',
37
+ description: 'Weight-Decomposed Low-Rank Adaptation: decomposes weights into magnitude and direction',
38
  order: 3
39
+ },
40
+ 'oft': {
41
+ icon: '\u{1F504}',
42
+ label: 'OFT',
43
+ description: 'Orthogonal Fine-Tuning: applies orthogonal transformations to preserve pre-trained features',
44
+ order: 4
45
+ },
46
+ 'probe': {
47
+ icon: '\u{1F50D}',
48
+ label: 'Probe',
49
+ description: 'Linear probing: freezes the encoder and trains only the classification head',
50
+ order: 5
51
+ },
52
+ 'full_finetune': {
53
+ icon: '\u{1F527}',
54
+ label: 'Full Fine-tune',
55
+ description: 'Full fine-tuning baseline: all model parameters are updated during training',
56
+ order: 6
57
  }
58
  };
59
 
 
78
  const matchedType = Object.entries(MODEL_TYPES).find(([key]) =>
79
  cleanType.includes(key)
80
  );
81
+ return matchedType ? matchedType[1].description : 'Unknown adapter method';
82
  };
83
 
84
  export const getModelTypeOrder = (type) => {
frontend/src/pages/LeaderboardPage/components/Leaderboard/constants/tooltips.js CHANGED
@@ -29,72 +29,157 @@ const createTooltipContent = (title, items) => (
29
  );
30
 
31
  export const COLUMN_TOOLTIPS = {
32
- AVERAGE: createTooltipContent("Average score across all benchmarks:", [
33
  {
34
  label: "Calculation",
35
- description: "Weighted average of normalized scores from all benchmarks",
36
  subItems: [
37
- "Each benchmark is normalized to a 0-100 scale",
38
- "All normalised benchmarks are then averaged together",
39
  ],
40
  },
41
  ]),
42
 
43
- ANLI: createTooltipContent("Adversarial Natural Language Inference (ANLI):", [
44
  {
45
- label: "Purpose",
46
- description:
47
- "Tests the model's ability to perform natural language inference on adversarially constructed examples",
48
  subItems: [
49
- "Entailment, contradiction, and neutral classification",
50
- "Adversarially-mined examples for robustness",
 
51
  ],
52
  },
53
  {
54
  label: "Scoring: Accuracy",
55
- description: "Was the correct label predicted for each example.",
56
  },
57
  ]),
58
 
59
- LOGIQA: createTooltipContent("Logical Reasoning QA (LogiQA):", [
60
  {
61
- label: "Purpose",
62
- description:
63
- "Evaluates logical reasoning abilities through multiple-choice questions",
64
  subItems: [
65
- "Categorical reasoning",
66
- "Sufficient conditional reasoning",
67
- "Necessary conditional reasoning",
68
- "Disjunctive reasoning",
69
- "Conjunctive reasoning",
70
  ],
71
  },
72
  {
73
  label: "Scoring: Accuracy",
74
- description:
75
- "Was the correct choice selected among the options.",
76
  },
77
  ]),
78
 
79
- ARCHITECTURE: createTooltipContent("Model Architecture Information:", [
80
  {
81
- label: "Definition",
82
- description: "The fundamental structure and design of the EEG model",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  subItems: [
84
- "Pretrained: Base EEG models trained with self-supervised learning on raw EEG data.",
85
- "Fine-tuned: Models fine-tuned on specific EEG datasets for particular downstream tasks.",
86
- "Task-specific: Models designed for specific EEG tasks such as sleep staging, motor imagery, or seizure detection.",
87
- "Foundation: Large-scale EEG foundation models trained on diverse EEG datasets.",
88
  ],
89
  },
90
  {
91
- label: "Impact",
92
- description: "How architecture affects model capabilities",
 
 
 
 
 
 
 
93
  subItems: [
94
- "Pretrained models provide general EEG representations but may need fine-tuning for specific tasks.",
95
- "Fine-tuned models are optimized for particular datasets or paradigms.",
96
- "Task-specific models achieve strong performance on their target task but may not generalize.",
97
- "Foundation models aim for broad generalization across EEG tasks.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  ],
99
  },
100
  ]),
@@ -108,16 +193,6 @@ export const COLUMN_TOOLTIPS = {
108
  "bfloat16: Half precision (Brain Float format), good for stability",
109
  "float16: Half precision",
110
  "8bit/4bit: Quantized formats, for efficiency",
111
- "GPTQ/AWQ: Quantized methods",
112
- ],
113
- },
114
- {
115
- label: "Impact",
116
- description: "How precision affects model deployment",
117
- subItems: [
118
- "Higher precision = better accuracy but more memory usage",
119
- "Lower precision = faster inference and smaller size",
120
- "Trade-off between model quality and resource usage",
121
  ],
122
  },
123
  ]),
@@ -136,9 +211,10 @@ export const COLUMN_TOOLTIPS = {
136
  label: "Measurement",
137
  description: "Total number of trainable parameters in millions",
138
  subItems: [
139
- "Indicates model capacity and complexity",
140
- "Correlates with computational requirements",
141
- "Influences memory usage and inference speed",
 
142
  ],
143
  },
144
  ]),
@@ -151,7 +227,6 @@ export const COLUMN_TOOLTIPS = {
151
  "Commercial vs non-commercial use",
152
  "Attribution requirements",
153
  "Modification and redistribution rights",
154
- "Liability and warranty terms",
155
  ],
156
  },
157
  ]),
@@ -176,7 +251,7 @@ export const UI_TOOLTIPS = {
176
  label: "Name Search",
177
  description: "Search directly by model name",
178
  subItems: [
179
- "Supports regular expressions (e.g., ^eegnet.*v4)",
180
  "Case sensitive",
181
  ],
182
  },
@@ -184,23 +259,23 @@ export const UI_TOOLTIPS = {
184
  label: "Field Search",
185
  description: "Use @field:value syntax for precise filtering",
186
  subItems: [
187
- "@architecture:eegnet - Filter by architecture",
188
  "@license:mit - Filter by license",
189
  "@precision:float16 - Filter by precision",
190
- "@type:pretrained - Filter by model type",
191
  ],
192
  },
193
  {
194
  label: "Multiple Searches",
195
  description: "Combine multiple criteria using semicolons",
196
  subItems: [
197
- "braindecode @license:mit; @architecture:eegnet",
198
- "^shallow.*net; @precision:float16",
199
  ],
200
  },
201
  ]),
202
  QUICK_FILTERS: createTooltipContent(
203
- "Filter models based on their size:",
204
  [
205
  {
206
  label: "Tiny (Up to 10M)",
@@ -210,17 +285,17 @@ export const UI_TOOLTIPS = {
210
  {
211
  label: "Small (10M-50M)",
212
  description:
213
- "Compact models balancing performance and efficiency.",
214
  },
215
  {
216
  label: "Medium (50M-200M)",
217
  description:
218
- "Mid-range models with good capacity for complex EEG tasks.",
219
  },
220
  {
221
  label: "Large (200M+)",
222
  description:
223
- "Large-scale models offering the best performance but requiring more resources.",
224
  },
225
  ]
226
  ),
@@ -232,7 +307,7 @@ export const UI_TOOLTIPS = {
232
  SCORE_DISPLAY: {
233
  title: "Score Display",
234
  description:
235
- "Choose between normalized scores (0-100% scale for easy comparison) or raw scores (actual benchmark results). Normalized scores help compare performance across different benchmarks, while raw scores show actual benchmark outputs.",
236
  },
237
  RANKING_MODE: {
238
  title: "Ranking Mode",
 
29
  );
30
 
31
  export const COLUMN_TOOLTIPS = {
32
+ AVERAGE: createTooltipContent("Average score across all EEG benchmarks:", [
33
  {
34
  label: "Calculation",
35
+ description: "Mean accuracy across all downstream EEG datasets",
36
  subItems: [
37
+ "Each benchmark score is accuracy normalized to 0-100",
38
+ "All benchmark scores are averaged together",
39
  ],
40
  },
41
  ]),
42
 
43
+ BCIC2A: createTooltipContent("BCI Competition IV Dataset 2a (BCIC-2a):", [
44
  {
45
+ label: "Task",
46
+ description: "4-class motor imagery classification",
 
47
  subItems: [
48
+ "Classes: left hand, right hand, feet, tongue",
49
+ "9 subjects, 22 EEG channels, 200 Hz",
50
+ "4-second trials with event-related windowing",
51
  ],
52
  },
53
  {
54
  label: "Scoring: Accuracy",
55
+ description: "Proportion of correctly classified motor imagery trials.",
56
  },
57
  ]),
58
 
59
+ PHYSIONET: createTooltipContent("PhysioNet Motor Imagery (PhysioNet MI):", [
60
  {
61
+ label: "Task",
62
+ description: "4-class motor imagery classification",
 
63
  subItems: [
64
+ "Classes: left hand, right hand, feet, both hands",
65
+ "109 subjects, 64 EEG channels, 200 Hz",
66
+ "3-second trials with event-related windowing",
 
 
67
  ],
68
  },
69
  {
70
  label: "Scoring: Accuracy",
71
+ description: "Proportion of correctly classified imagery trials.",
 
72
  },
73
  ]),
74
 
75
+ ISRUC_SLEEP: createTooltipContent("ISRUC-SLEEP Group I:", [
76
  {
77
+ label: "Task",
78
+ description: "5-class sleep staging",
79
+ subItems: [
80
+ "Classes: Wake, N1, N2, N3, REM",
81
+ "~100 subjects, 6 EEG channels, 200 Hz",
82
+ "30-second epoch windows",
83
+ ],
84
+ },
85
+ {
86
+ label: "Scoring: Accuracy",
87
+ description: "Proportion of correctly classified 30s sleep epochs.",
88
+ },
89
+ ]),
90
+
91
+ TUAB: createTooltipContent("TUH EEG Abnormal Corpus (TUAB v3.0.1):", [
92
+ {
93
+ label: "Task",
94
+ description: "Binary pathology detection (normal vs abnormal)",
95
+ subItems: [
96
+ "Classes: normal, abnormal",
97
+ "290+ subjects, 16 EEG channels, 200 Hz",
98
+ "10-second fixed-length windows",
99
+ ],
100
+ },
101
+ {
102
+ label: "Scoring: Accuracy",
103
+ description: "Proportion of correctly classified recordings.",
104
+ },
105
+ ]),
106
+
107
+ TUEV: createTooltipContent("TUH EEG Events Corpus (TUEV v2.0.1):", [
108
+ {
109
+ label: "Task",
110
+ description: "6-class EEG event classification",
111
  subItems: [
112
+ "Classes: SPSW, GPED, PLED, EYEM, ARTF, BCKG",
113
+ "200+ subjects, 21 EEG channels, 200 Hz",
114
+ "5-second event windows",
 
115
  ],
116
  },
117
  {
118
+ label: "Scoring: Accuracy",
119
+ description: "Proportion of correctly classified EEG events.",
120
+ },
121
+ ]),
122
+
123
+ CHBMIT: createTooltipContent("CHB-MIT Scalp EEG Database:", [
124
+ {
125
+ label: "Task",
126
+ description: "Binary seizure detection",
127
  subItems: [
128
+ "Classes: seizure, background (non-seizure)",
129
+ "23 pediatric subjects, 17 EEG channels, 200 Hz",
130
+ "10-second windows from bipolar montage",
131
+ ],
132
+ },
133
+ {
134
+ label: "Scoring: Accuracy",
135
+ description: "Proportion of correctly detected seizure and background windows.",
136
+ },
137
+ ]),
138
+
139
+ FACED: createTooltipContent("FACED Emotion Recognition Dataset:", [
140
+ {
141
+ label: "Task",
142
+ description: "9-class discrete emotion recognition from EEG",
143
+ subItems: [
144
+ "Classes: Anger, Disgust, Fear, Sadness, Neutral, Amusement, Inspiration, Joy, Tenderness",
145
+ "123 subjects, 26 EEG channels, 200 Hz",
146
+ "10-second event-related windows",
147
+ ],
148
+ },
149
+ {
150
+ label: "Scoring: Accuracy",
151
+ description: "Proportion of correctly classified emotion labels.",
152
+ },
153
+ ]),
154
+
155
+ SEEDV: createTooltipContent("SEED-V Emotion Recognition Dataset:", [
156
+ {
157
+ label: "Task",
158
+ description: "5-class emotion recognition from EEG",
159
+ subItems: [
160
+ "Classes: Happy, Sad, Neutral, Disgust, Fear",
161
+ "62 EEG channels, 200 Hz",
162
+ "1-second windows, session-based splits",
163
+ ],
164
+ },
165
+ {
166
+ label: "Scoring: Accuracy",
167
+ description: "Proportion of correctly classified emotion labels.",
168
+ },
169
+ ]),
170
+
171
+ ARCHITECTURE: createTooltipContent("EEG Foundation Model:", [
172
+ {
173
+ label: "Definition",
174
+ description: "The pre-trained foundation model that is being adapted",
175
+ subItems: [
176
+ "LaBraM: Vision Transformer for EEG (12 layers, 200D embedding)",
177
+ "EEGPT: Transformer with patch-based tokenization (8 layers, 512D)",
178
+ "BIOT: Linear Attention Transformer (4 layers, 256D)",
179
+ "BENDR: CNN + BERT-inspired Transformer (8 layers, 512D encoder)",
180
+ "SignalJEPA: CNN + Transformer with JEPA-style predictive learning",
181
+ "CBraMod: Criss-Cross Transformer with separate spatial/temporal attention (12 layers, 200D)",
182
+ "REVE: Vision Transformer with GEGLU (22 layers, 512D)",
183
  ],
184
  },
185
  ]),
 
193
  "bfloat16: Half precision (Brain Float format), good for stability",
194
  "float16: Half precision",
195
  "8bit/4bit: Quantized formats, for efficiency",
 
 
 
 
 
 
 
 
 
 
196
  ],
197
  },
198
  ]),
 
211
  label: "Measurement",
212
  description: "Total number of trainable parameters in millions",
213
  subItems: [
214
+ "Foundation model total parameters (before adapter)",
215
+ "Adapter methods reduce trainable params by 90-99.5%",
216
+ "LoRA r=16: ~98% reduction",
217
+ "IA3: ~99.5% reduction (only scaling vectors)",
218
  ],
219
  },
220
  ]),
 
227
  "Commercial vs non-commercial use",
228
  "Attribution requirements",
229
  "Modification and redistribution rights",
 
230
  ],
231
  },
232
  ]),
 
251
  label: "Name Search",
252
  description: "Search directly by model name",
253
  subItems: [
254
+ "Supports regular expressions (e.g., ^labram.*lora)",
255
  "Case sensitive",
256
  ],
257
  },
 
259
  label: "Field Search",
260
  description: "Use @field:value syntax for precise filtering",
261
  subItems: [
262
+ "@architecture:labram - Filter by foundation model",
263
  "@license:mit - Filter by license",
264
  "@precision:float16 - Filter by precision",
265
+ "@type:lora - Filter by adapter method",
266
  ],
267
  },
268
  {
269
  label: "Multiple Searches",
270
  description: "Combine multiple criteria using semicolons",
271
  subItems: [
272
+ "labram @type:lora; @architecture:eegpt",
273
+ "^biot.*ia3; @precision:float16",
274
  ],
275
  },
276
  ]),
277
  QUICK_FILTERS: createTooltipContent(
278
+ "Filter models based on their total parameter count:",
279
  [
280
  {
281
  label: "Tiny (Up to 10M)",
 
285
  {
286
  label: "Small (10M-50M)",
287
  description:
288
+ "Compact models balancing performance and efficiency (e.g., CBraMod ~4M, EEGPT ~10M).",
289
  },
290
  {
291
  label: "Medium (50M-200M)",
292
  description:
293
+ "Mid-range foundation models with good capacity.",
294
  },
295
  {
296
  label: "Large (200M+)",
297
  description:
298
+ "Large foundation models offering broad generalization (e.g., REVE, LaBraM).",
299
  },
300
  ]
301
  ),
 
307
  SCORE_DISPLAY: {
308
  title: "Score Display",
309
  description:
310
+ "Choose between normalized scores (0-100% scale for easy comparison) or raw scores (actual accuracy values 0-1). Normalized scores help compare performance across different benchmarks.",
311
  },
312
  RANKING_MODE: {
313
  title: "Ranking Mode",
frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useDataUtils.js CHANGED
@@ -57,10 +57,6 @@ export const useProcessedData = (data, averageMode, visibleColumns) => {
57
  // Boolean standardization
58
  const standardizedFeatures = {
59
  ...item.features,
60
- is_moe: Boolean(item.features.is_moe),
61
- is_flagged: Boolean(item.features.is_flagged),
62
- is_official_provider: Boolean(item.features.is_official_provider),
63
- is_merged: Boolean(item.features.is_merged),
64
  is_not_available_on_hub: Boolean(item.features.is_not_available_on_hub),
65
  };
66
 
@@ -69,7 +65,6 @@ export const useProcessedData = (data, averageMode, visibleColumns) => {
69
  features: standardizedFeatures,
70
  model: {
71
  ...item.model,
72
- has_chat_template: Boolean(item.model.has_chat_template),
73
  average_score: average,
74
  },
75
  };
@@ -101,8 +96,7 @@ export const useFilteredData = (
101
  searchValue,
102
  selectedBooleanFilters,
103
  rankingMode,
104
- pinnedModels = [],
105
- isOfficialProviderActive = false
106
  ) => {
107
  return useMemo(() => {
108
  const pinnedData = processedData.filter((row) => {
@@ -115,15 +109,6 @@ export const useFilteredData = (
115
 
116
  let filteredUnpinned = unpinnedData;
117
 
118
- // Filter by official providers
119
- if (isOfficialProviderActive) {
120
- filteredUnpinned = filteredUnpinned.filter(
121
- (row) =>
122
- row.features?.is_official_provider ||
123
- row.metadata?.is_official_provider
124
- );
125
- }
126
-
127
  // Filter by precision
128
  if (selectedPrecisions.length > 0) {
129
  filteredUnpinned = filteredUnpinned.filter((row) =>
@@ -131,7 +116,7 @@ export const useFilteredData = (
131
  );
132
  }
133
 
134
- // Filter by type
135
  if (
136
  selectedTypes.length > 0 &&
137
  selectedTypes.length < MODEL_TYPE_ORDER.length
@@ -142,11 +127,11 @@ export const useFilteredData = (
142
  });
143
  }
144
 
145
- // Filter by parameters
146
- if (!(paramsRange[0] === -1 && paramsRange[1] === 140)) {
147
  filteredUnpinned = filteredUnpinned.filter((row) => {
148
  const params =
149
- row.metadata?.params_billions || row.features?.params_billions;
150
  if (params === undefined || params === null) return false;
151
  return params >= paramsRange[0] && params < paramsRange[1];
152
  });
@@ -200,12 +185,6 @@ export const useFilteredData = (
200
  const filterValue =
201
  typeof filter === "object" ? filter.value : filter;
202
 
203
- // Maintainer's Highlight keeps positive logic
204
- if (filterValue === "is_official_provider") {
205
- return row.features[filterValue];
206
- }
207
-
208
- // For all other filters, invert the logic
209
  if (filterValue === "is_not_available_on_hub") {
210
  return row.features[filterValue];
211
  }
@@ -280,7 +259,6 @@ export const useFilteredData = (
280
  selectedBooleanFilters,
281
  rankingMode,
282
  pinnedModels,
283
- isOfficialProviderActive,
284
  ]);
285
  };
286
 
 
57
  // Boolean standardization
58
  const standardizedFeatures = {
59
  ...item.features,
 
 
 
 
60
  is_not_available_on_hub: Boolean(item.features.is_not_available_on_hub),
61
  };
62
 
 
65
  features: standardizedFeatures,
66
  model: {
67
  ...item.model,
 
68
  average_score: average,
69
  },
70
  };
 
96
  searchValue,
97
  selectedBooleanFilters,
98
  rankingMode,
99
+ pinnedModels = []
 
100
  ) => {
101
  return useMemo(() => {
102
  const pinnedData = processedData.filter((row) => {
 
109
 
110
  let filteredUnpinned = unpinnedData;
111
 
 
 
 
 
 
 
 
 
 
112
  // Filter by precision
113
  if (selectedPrecisions.length > 0) {
114
  filteredUnpinned = filteredUnpinned.filter((row) =>
 
116
  );
117
  }
118
 
119
+ // Filter by type (adapter method)
120
  if (
121
  selectedTypes.length > 0 &&
122
  selectedTypes.length < MODEL_TYPE_ORDER.length
 
127
  });
128
  }
129
 
130
+ // Filter by parameters (in millions for EEG)
131
+ if (!(paramsRange[0] === -1 && paramsRange[1] === 500)) {
132
  filteredUnpinned = filteredUnpinned.filter((row) => {
133
  const params =
134
+ row.metadata?.params_millions || row.features?.params_millions;
135
  if (params === undefined || params === null) return false;
136
  return params >= paramsRange[0] && params < paramsRange[1];
137
  });
 
185
  const filterValue =
186
  typeof filter === "object" ? filter.value : filter;
187
 
 
 
 
 
 
 
188
  if (filterValue === "is_not_available_on_hub") {
189
  return row.features[filterValue];
190
  }
 
259
  selectedBooleanFilters,
260
  rankingMode,
261
  pinnedModels,
 
262
  ]);
263
  };
264
 
frontend/src/pages/LeaderboardPage/components/Leaderboard/hooks/useLeaderboardData.js CHANGED
@@ -73,7 +73,6 @@ export const useLeaderboardProcessing = () => {
73
  types: state.filters.types,
74
  paramsRange: state.filters.paramsRange,
75
  booleanFilters: state.filters.booleanFilters,
76
- isOfficialProviderActive: state.filters.isOfficialProviderActive,
77
  }),
78
  [
79
  state.filters.search,
@@ -81,7 +80,6 @@ export const useLeaderboardProcessing = () => {
81
  state.filters.types,
82
  state.filters.paramsRange,
83
  state.filters.booleanFilters,
84
- state.filters.isOfficialProviderActive,
85
  ]
86
  );
87
 
@@ -108,8 +106,7 @@ export const useLeaderboardProcessing = () => {
108
  state.display.scoreDisplay,
109
  state.pinnedModels,
110
  actions.togglePinnedModel,
111
- setSorting,
112
- memoizedFilters.isOfficialProviderActive
113
  );
114
 
115
  return {
 
73
  types: state.filters.types,
74
  paramsRange: state.filters.paramsRange,
75
  booleanFilters: state.filters.booleanFilters,
 
76
  }),
77
  [
78
  state.filters.search,
 
80
  state.filters.types,
81
  state.filters.paramsRange,
82
  state.filters.booleanFilters,
 
83
  ]
84
  );
85
 
 
106
  state.display.scoreDisplay,
107
  state.pinnedModels,
108
  actions.togglePinnedModel,
109
+ setSorting
 
110
  );
111
 
112
  return {
frontend/src/pages/LeaderboardPage/components/Leaderboard/utils/columnUtils.js CHANGED
@@ -276,7 +276,7 @@ const RankIndicator = ({ rank, previousRank, mode }) => {
276
 
277
  const getDetailsUrl = (modelName) => {
278
  const formattedName = modelName.replace("/", "__");
279
- return `https://huggingface.co/datasets/open-llm-leaderboard/${formattedName}-details`;
280
  };
281
 
282
  const HeaderLabel = ({ label, tooltip, className, isSorted }) => (
@@ -366,7 +366,7 @@ const createHeaderCell = (label, tooltip) => (header) =>
366
  );
367
 
368
  const createModelHeader =
369
- (totalModels, officialProvidersCount = 0, isOfficialProviderActive = false) =>
370
  ({ table }) => {
371
  return (
372
  <Box
@@ -751,57 +751,75 @@ export const createColumns = (
751
 
752
  const evaluationColumns = [
753
  {
754
- accessorKey: "evaluations.ifeval.normalized_score",
755
- header: createHeaderCell("IFEval", COLUMN_TOOLTIPS.IFEVAL),
756
  cell: ({ row, getValue }) =>
757
- createScoreCell(getValue, row, "evaluations.ifeval.normalized_score"),
758
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
759
- "evaluations.ifeval.normalized_score"
760
  ],
761
  },
762
  {
763
- accessorKey: "evaluations.bbh.normalized_score",
764
- header: createHeaderCell("BBH", COLUMN_TOOLTIPS.BBH),
765
  cell: ({ row, getValue }) =>
766
- createScoreCell(getValue, row, "evaluations.bbh.normalized_score"),
767
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
768
- "evaluations.bbh.normalized_score"
769
  ],
770
  },
771
  {
772
- accessorKey: "evaluations.math.normalized_score",
773
- header: createHeaderCell("MATH", COLUMN_TOOLTIPS.MATH),
774
  cell: ({ row, getValue }) =>
775
- createScoreCell(getValue, row, "evaluations.math.normalized_score"),
776
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
777
- "evaluations.math.normalized_score"
778
  ],
779
  },
780
  {
781
- accessorKey: "evaluations.gpqa.normalized_score",
782
- header: createHeaderCell("GPQA", COLUMN_TOOLTIPS.GPQA),
783
  cell: ({ row, getValue }) =>
784
- createScoreCell(getValue, row, "evaluations.gpqa.normalized_score"),
785
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
786
- "evaluations.gpqa.normalized_score"
787
  ],
788
  },
789
  {
790
- accessorKey: "evaluations.musr.normalized_score",
791
- header: createHeaderCell("MUSR", COLUMN_TOOLTIPS.MUSR),
792
  cell: ({ row, getValue }) =>
793
- createScoreCell(getValue, row, "evaluations.musr.normalized_score"),
794
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
795
- "evaluations.musr.normalized_score"
796
  ],
797
  },
798
  {
799
- accessorKey: "evaluations.mmlu_pro.normalized_score",
800
- header: createHeaderCell("MMLU-PRO", COLUMN_TOOLTIPS.MMLU_PRO),
801
  cell: ({ row, getValue }) =>
802
- createScoreCell(getValue, row, "evaluations.mmlu_pro.normalized_score"),
803
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
804
- "evaluations.mmlu_pro.normalized_score"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
805
  ],
806
  },
807
  ];
@@ -830,8 +848,8 @@ export const createColumns = (
830
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["model.precision"],
831
  },
832
  {
833
- accessorKey: "metadata.params_billions",
834
- header: createHeaderCell("Parameters", COLUMN_TOOLTIPS.PARAMETERS),
835
  cell: ({ row }) => (
836
  <Box
837
  sx={{
@@ -841,12 +859,12 @@ export const createColumns = (
841
  }}
842
  >
843
  <Typography variant="body2">
844
- {row.original.metadata.params_billions}
845
- <span style={{ opacity: 0.6 }}>B</span>
846
  </Typography>
847
  </Box>
848
  ),
849
- size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.params_billions"],
850
  },
851
  {
852
  accessorKey: "metadata.hub_license",
@@ -924,19 +942,6 @@ export const createColumns = (
924
  ),
925
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.submission_date"],
926
  },
927
- {
928
- accessorKey: "metadata.generation",
929
- header: createHeaderCell(
930
- "Generation",
931
- "The generation or version number of the model"
932
- ),
933
- cell: ({ row }) => (
934
- <Typography variant="body2">
935
- {row.original.metadata.generation}
936
- </Typography>
937
- ),
938
- size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.generation"],
939
- },
940
  {
941
  accessorKey: "metadata.base_model",
942
  header: createHeaderCell(
@@ -960,34 +965,39 @@ export const createColumns = (
960
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.base_model"],
961
  },
962
  {
963
- accessorKey: "metadata.co2_cost",
964
- header: createHeaderCell("CO₂ Cost", COLUMN_TOOLTIPS.CO2_COST),
 
 
 
965
  cell: ({ row }) => (
966
- <Box
967
- sx={{
968
- display: "flex",
969
- alignItems: "center",
970
- justifyContent: "flex-start",
971
- }}
972
- >
973
- <Typography variant="body2">
974
- {row.original.metadata.co2_cost?.toFixed(2) || "0"}
975
- <span style={{ opacity: 0.6 }}> kg</span>
976
  </Typography>
977
- </Box>
978
  ),
979
- size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.co2_cost"],
980
  },
981
  {
982
- accessorKey: "model.has_chat_template",
983
  header: createHeaderCell(
984
- "Chat Template",
985
- "Whether this model has a chat template defined"
986
  ),
987
  cell: ({ row }) => (
988
- <BooleanValue value={row.original.model.has_chat_template} />
 
 
989
  ),
990
- size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["model.has_chat_template"],
991
  },
992
  {
993
  accessorKey: "features.is_not_available_on_hub",
@@ -1002,42 +1012,6 @@ export const createColumns = (
1002
  "features.is_not_available_on_hub"
1003
  ],
1004
  },
1005
- {
1006
- accessorKey: "features.is_official_provider",
1007
- header: createHeaderCell(
1008
- "Official Providers",
1009
- "Models that are officially provided and maintained by their original creators or organizations"
1010
- ),
1011
- cell: ({ row }) => (
1012
- <BooleanValue
1013
- value={row.original.features.is_official_provider}
1014
- />
1015
- ),
1016
- size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
1017
- "features.is_official_provider"
1018
- ],
1019
- enableSorting: true,
1020
- },
1021
- {
1022
- accessorKey: "features.is_moe",
1023
- header: createHeaderCell(
1024
- "Mixture of Experts",
1025
- "Whether this model uses a Mixture of Experts architecture"
1026
- ),
1027
- cell: ({ row }) => <BooleanValue value={row.original.features.is_moe} />,
1028
- size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["features.is_moe"],
1029
- },
1030
- {
1031
- accessorKey: "features.is_flagged",
1032
- header: createHeaderCell(
1033
- "Flag Status",
1034
- "Whether this model has been flagged for any issues"
1035
- ),
1036
- cell: ({ row }) => (
1037
- <BooleanValue value={row.original.features.is_flagged} />
1038
- ),
1039
- size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["features.is_flagged"],
1040
- },
1041
  ];
1042
 
1043
  // Utiliser directement columnVisibility
@@ -1051,19 +1025,15 @@ export const createColumns = (
1051
  const order = {
1052
  "model.architecture": 1,
1053
  "model.precision": 2,
1054
- "metadata.params_billions": 3,
1055
  "metadata.hub_license": 4,
1056
- "metadata.co2_cost": 5,
1057
- "metadata.hub_hearts": 6,
1058
- "metadata.upload_date": 7,
1059
- "metadata.submission_date": 8,
1060
- "metadata.generation": 9,
1061
  "metadata.base_model": 10,
1062
- "model.has_chat_template": 11,
1063
- "features.is_not_available_on_hub": 12,
1064
- "features.is_official_provider": 13,
1065
- "features.is_moe": 14,
1066
- "features.is_flagged": 15,
1067
  };
1068
  return order[a.accessorKey] - order[b.accessorKey];
1069
  }),
 
276
 
277
  const getDetailsUrl = (modelName) => {
278
  const formattedName = modelName.replace("/", "__");
279
+ return `https://huggingface.co/datasets/braindecode/${formattedName}-details`;
280
  };
281
 
282
  const HeaderLabel = ({ label, tooltip, className, isSorted }) => (
 
366
  );
367
 
368
  const createModelHeader =
369
+ (totalModels) =>
370
  ({ table }) => {
371
  return (
372
  <Box
 
751
 
752
  const evaluationColumns = [
753
  {
754
+ accessorKey: "evaluations.bcic2a.normalized_score",
755
+ header: createHeaderCell("BCIC-2a", COLUMN_TOOLTIPS.BCIC2A),
756
  cell: ({ row, getValue }) =>
757
+ createScoreCell(getValue, row, "evaluations.bcic2a.normalized_score"),
758
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
759
+ "evaluations.bcic2a.normalized_score"
760
  ],
761
  },
762
  {
763
+ accessorKey: "evaluations.physionet.normalized_score",
764
+ header: createHeaderCell("PhysioNet", COLUMN_TOOLTIPS.PHYSIONET),
765
  cell: ({ row, getValue }) =>
766
+ createScoreCell(getValue, row, "evaluations.physionet.normalized_score"),
767
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
768
+ "evaluations.physionet.normalized_score"
769
  ],
770
  },
771
  {
772
+ accessorKey: "evaluations.isruc_sleep.normalized_score",
773
+ header: createHeaderCell("ISRUC", COLUMN_TOOLTIPS.ISRUC_SLEEP),
774
  cell: ({ row, getValue }) =>
775
+ createScoreCell(getValue, row, "evaluations.isruc_sleep.normalized_score"),
776
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
777
+ "evaluations.isruc_sleep.normalized_score"
778
  ],
779
  },
780
  {
781
+ accessorKey: "evaluations.tuab.normalized_score",
782
+ header: createHeaderCell("TUAB", COLUMN_TOOLTIPS.TUAB),
783
  cell: ({ row, getValue }) =>
784
+ createScoreCell(getValue, row, "evaluations.tuab.normalized_score"),
785
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
786
+ "evaluations.tuab.normalized_score"
787
  ],
788
  },
789
  {
790
+ accessorKey: "evaluations.tuev.normalized_score",
791
+ header: createHeaderCell("TUEV", COLUMN_TOOLTIPS.TUEV),
792
  cell: ({ row, getValue }) =>
793
+ createScoreCell(getValue, row, "evaluations.tuev.normalized_score"),
794
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
795
+ "evaluations.tuev.normalized_score"
796
  ],
797
  },
798
  {
799
+ accessorKey: "evaluations.chbmit.normalized_score",
800
+ header: createHeaderCell("CHB-MIT", COLUMN_TOOLTIPS.CHBMIT),
801
  cell: ({ row, getValue }) =>
802
+ createScoreCell(getValue, row, "evaluations.chbmit.normalized_score"),
803
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
804
+ "evaluations.chbmit.normalized_score"
805
+ ],
806
+ },
807
+ {
808
+ accessorKey: "evaluations.faced.normalized_score",
809
+ header: createHeaderCell("FACED", COLUMN_TOOLTIPS.FACED),
810
+ cell: ({ row, getValue }) =>
811
+ createScoreCell(getValue, row, "evaluations.faced.normalized_score"),
812
+ size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
813
+ "evaluations.faced.normalized_score"
814
+ ],
815
+ },
816
+ {
817
+ accessorKey: "evaluations.seedv.normalized_score",
818
+ header: createHeaderCell("SEED-V", COLUMN_TOOLTIPS.SEEDV),
819
+ cell: ({ row, getValue }) =>
820
+ createScoreCell(getValue, row, "evaluations.seedv.normalized_score"),
821
+ size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES[
822
+ "evaluations.seedv.normalized_score"
823
  ],
824
  },
825
  ];
 
848
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["model.precision"],
849
  },
850
  {
851
+ accessorKey: "metadata.params_millions",
852
+ header: createHeaderCell("Parameters (M)", COLUMN_TOOLTIPS.PARAMETERS),
853
  cell: ({ row }) => (
854
  <Box
855
  sx={{
 
859
  }}
860
  >
861
  <Typography variant="body2">
862
+ {row.original.metadata.params_millions}
863
+ <span style={{ opacity: 0.6 }}>M</span>
864
  </Typography>
865
  </Box>
866
  ),
867
+ size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.params_millions"],
868
  },
869
  {
870
  accessorKey: "metadata.hub_license",
 
942
  ),
943
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.submission_date"],
944
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
945
  {
946
  accessorKey: "metadata.base_model",
947
  header: createHeaderCell(
 
965
  size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.base_model"],
966
  },
967
  {
968
+ accessorKey: "metadata.adapter_method",
969
+ header: createHeaderCell(
970
+ "Adapter Method",
971
+ "The PEFT adapter method used for fine-tuning"
972
+ ),
973
  cell: ({ row }) => (
974
+ <Tooltip title={row.original.metadata.adapter_method || "-"}>
975
+ <Typography
976
+ variant="body2"
977
+ sx={{
978
+ overflow: "hidden",
979
+ textOverflow: "ellipsis",
980
+ whiteSpace: "nowrap",
981
+ }}
982
+ >
983
+ {row.original.metadata.adapter_method || "-"}
984
  </Typography>
985
+ </Tooltip>
986
  ),
987
+ size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.adapter_method"],
988
  },
989
  {
990
+ accessorKey: "metadata.embed_dim",
991
  header: createHeaderCell(
992
+ "Embed Dim",
993
+ "Embedding dimension of the foundation model"
994
  ),
995
  cell: ({ row }) => (
996
+ <Typography variant="body2">
997
+ {row.original.metadata.embed_dim || "-"}
998
+ </Typography>
999
  ),
1000
+ size: TABLE_DEFAULTS.COLUMNS.COLUMN_SIZES["metadata.embed_dim"],
1001
  },
1002
  {
1003
  accessorKey: "features.is_not_available_on_hub",
 
1012
  "features.is_not_available_on_hub"
1013
  ],
1014
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1015
  ];
1016
 
1017
  // Utiliser directement columnVisibility
 
1025
  const order = {
1026
  "model.architecture": 1,
1027
  "model.precision": 2,
1028
+ "metadata.params_millions": 3,
1029
  "metadata.hub_license": 4,
1030
+ "metadata.hub_hearts": 5,
1031
+ "metadata.adapter_method": 6,
1032
+ "metadata.embed_dim": 7,
1033
+ "metadata.upload_date": 8,
1034
+ "metadata.submission_date": 9,
1035
  "metadata.base_model": 10,
1036
+ "features.is_not_available_on_hub": 11,
 
 
 
 
1037
  };
1038
  return order[a.accessorKey] - order[b.accessorKey];
1039
  }),
frontend/src/pages/QuotePage/QuotePage.js CHANGED
@@ -33,28 +33,120 @@ const citations = [
33
 
34
  const benchmarks = [
35
  {
36
- title: "ANLI: Adversarial Natural Language Inference",
37
- authors: "Nie et al.",
38
- citation: `@inproceedings{nie2020adversarial,
39
- title={Adversarial NLI: A New Benchmark for Natural Language Understanding},
40
- author={Nie, Yixin and Williams, Adina and Dinan, Emily and Bansal, Mohit and Weston, Jason and Kiela, Douwe},
41
- booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
42
- pages={4885--4901},
43
- year={2020}
 
44
  }`,
45
- url: "https://arxiv.org/abs/1910.14599",
46
  },
47
  {
48
- title: "LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  authors: "Liu et al.",
50
- citation: `@inproceedings{liu2020logiqa,
51
- title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning},
52
- author={Liu, Jian and Cui, Leyang and Liu, Hanmeng and Huang, Dandan and Wang, Yile and Zhang, Yue},
53
- booktitle={Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence},
54
- pages={3622--3628},
55
- year={2020}
 
 
 
56
  }`,
57
- url: "https://arxiv.org/abs/2007.08124",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  },
59
  ];
60
 
@@ -147,8 +239,9 @@ function QuotePage() {
147
 
148
  <Alert severity="info" sx={{ mb: 4 }}>
149
  <Typography variant="body2">
150
- The citations below include both the EEG Finetune Arena and the
151
- individual benchmarks used in our evaluation suite.
 
152
  </Typography>
153
  </Alert>
154
 
@@ -163,9 +256,9 @@ function QuotePage() {
163
  </Box>
164
  </Box>
165
 
166
- <Box>
167
  <Typography variant="h5" sx={{ mb: 3 }}>
168
- Benchmarks
169
  </Typography>
170
  <Box sx={{ display: "flex", flexDirection: "column", gap: 3 }}>
171
  {benchmarks.map((benchmark, index) => (
@@ -173,6 +266,17 @@ function QuotePage() {
173
  ))}
174
  </Box>
175
  </Box>
 
 
 
 
 
 
 
 
 
 
 
176
  </Box>
177
  );
178
  }
 
33
 
34
  const benchmarks = [
35
  {
36
+ title: "BCIC-2a: BCI Competition IV Dataset 2a",
37
+ authors: "Brunner et al.",
38
+ citation: `@article{brunner2008bci,
39
+ title={BCI Competition 2008--Graz data set A},
40
+ author={Brunner, Clemens and Leeb, Robert and M{\\\"u}ller-Putz, Gernot and Schl{\\\"o}gl, Alois and Pfurtscheller, Gert},
41
+ journal={Institute for Knowledge Discovery (Laboratory of Brain-Computer Interfaces), Graz University of Technology},
42
+ volume={16},
43
+ pages={1--6},
44
+ year={2008}
45
  }`,
 
46
  },
47
  {
48
+ title: "PhysioNet Motor Imagery",
49
+ authors: "Schalk et al.",
50
+ citation: `@article{schalk2004bci2000,
51
+ title={BCI2000: a general-purpose brain-computer interface (BCI) system},
52
+ author={Schalk, Gerwin and McFarland, Dennis J and Hinterberger, Thilo and Birbaumer, Niels and Wolpaw, Jonathan R},
53
+ journal={IEEE Transactions on biomedical engineering},
54
+ volume={51},
55
+ number={6},
56
+ pages={1034--1043},
57
+ year={2004},
58
+ publisher={IEEE}
59
+ }`,
60
+ },
61
+ {
62
+ title: "ISRUC-SLEEP: Sleep Staging Dataset",
63
+ authors: "Khalighi et al.",
64
+ citation: `@article{khalighi2016isruc,
65
+ title={ISRUC-Sleep: A comprehensive public dataset for sleep researchers},
66
+ author={Khalighi, Sirvan and Sousa, Teresa and Santos, Jo{\\~a}o M and Nunes, Urbano},
67
+ journal={Computer methods and programs in biomedicine},
68
+ volume={124},
69
+ pages={180--192},
70
+ year={2016},
71
+ publisher={Elsevier}
72
+ }`,
73
+ },
74
+ {
75
+ title: "TUAB: TUH EEG Abnormal Corpus",
76
+ authors: "Lopez de Diego et al.",
77
+ citation: `@inproceedings{lopez2017automated,
78
+ title={Automated interpretation of abnormal adult electroencephalograms},
79
+ author={Lopez de Diego, Silvia and Obeid, Iyad and Bhatt, Priyanka and Bhatt, Sagar and Jacobson, Douglas and Picone, Joseph},
80
+ year={2017}
81
+ }`,
82
+ },
83
+ {
84
+ title: "CHB-MIT: Scalp EEG Database",
85
+ authors: "Shoeb",
86
+ citation: `@phdthesis{shoeb2009application,
87
+ title={Application of machine learning to epileptic seizure onset detection and treatment},
88
+ author={Shoeb, Ali Hossam},
89
+ year={2009},
90
+ school={Massachusetts Institute of Technology}
91
+ }`,
92
+ },
93
+ {
94
+ title: "FACED: Emotion Recognition Dataset",
95
+ authors: "Chen et al.",
96
+ citation: `@article{chen2023large,
97
+ title={A large finer-grained affective computing EEG dataset},
98
+ author={Chen, Jingjing and Wang, Xiaobin and Huang, Chen and Hu, Xin and Shen, Xinke and Li, Dan},
99
+ journal={Scientific Data},
100
+ volume={10},
101
+ number={1},
102
+ pages={740},
103
+ year={2023},
104
+ publisher={Nature Publishing Group UK London}
105
+ }`,
106
+ url: "https://www.nature.com/articles/s41597-023-02650-w",
107
+ },
108
+ {
109
+ title: "SEED-V: Emotion Recognition with Video Stimuli",
110
  authors: "Liu et al.",
111
+ citation: `@article{liu2021comparing,
112
+ title={Comparing recognition performance and robustness of multimodal deep learning models for multimodal emotion recognition},
113
+ author={Liu, Wei and Qiu, Jie-Lin and Zheng, Wei-Long and Lu, Bao-Liang},
114
+ journal={IEEE Transactions on Cognitive and Developmental Systems},
115
+ volume={14},
116
+ number={2},
117
+ pages={715--729},
118
+ year={2021},
119
+ publisher={IEEE}
120
  }`,
121
+ },
122
+ ];
123
+
124
+ const frameworks = [
125
+ {
126
+ title: "MOABB: Mother of All BCI Benchmarks",
127
+ authors: "Jayaram, Barachant",
128
+ citation: `@article{jayaram2018moabb,
129
+ title={MOABB: trustworthy algorithm benchmarking for BCIs},
130
+ author={Jayaram, Vinay and Barachant, Alexandre},
131
+ journal={Journal of neural engineering},
132
+ volume={15},
133
+ number={6},
134
+ pages={066011},
135
+ year={2018},
136
+ publisher={IOP Publishing}
137
+ }`,
138
+ url: "https://moabb.neurotechx.com",
139
+ },
140
+ {
141
+ title: "PEFT: Parameter-Efficient Fine-Tuning",
142
+ authors: "Mangrulkar et al.",
143
+ citation: `@misc{peft2024,
144
+ title={PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods},
145
+ author={Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan},
146
+ year={2024},
147
+ howpublished={\\url{https://github.com/huggingface/peft}}
148
+ }`,
149
+ url: "https://huggingface.co/docs/peft",
150
  },
151
  ];
152
 
 
239
 
240
  <Alert severity="info" sx={{ mb: 4 }}>
241
  <Typography variant="body2">
242
+ The citations below include the core library, the individual EEG
243
+ benchmark datasets, and the evaluation frameworks used in our
244
+ benchmark suite.
245
  </Typography>
246
  </Alert>
247
 
 
256
  </Box>
257
  </Box>
258
 
259
+ <Box sx={{ mb: 6 }}>
260
  <Typography variant="h5" sx={{ mb: 3 }}>
261
+ EEG Benchmark Datasets
262
  </Typography>
263
  <Box sx={{ display: "flex", flexDirection: "column", gap: 3 }}>
264
  {benchmarks.map((benchmark, index) => (
 
266
  ))}
267
  </Box>
268
  </Box>
269
+
270
+ <Box>
271
+ <Typography variant="h5" sx={{ mb: 3 }}>
272
+ Evaluation Frameworks
273
+ </Typography>
274
+ <Box sx={{ display: "flex", flexDirection: "column", gap: 3 }}>
275
+ {frameworks.map((framework, index) => (
276
+ <CitationBlock key={index} {...framework} />
277
+ ))}
278
+ </Box>
279
+ </Box>
280
  </Box>
281
  );
282
  }