Spaces:
Running
Running
| import React, { useState } from "react"; | |
| import { Box, Typography, Paper, Link, IconButton, Tooltip } from "@mui/material"; | |
| import ContentCopyIcon from "@mui/icons-material/ContentCopy"; | |
| import CheckIcon from "@mui/icons-material/Check"; | |
| import PageHeader from "../../components/shared/PageHeader"; | |
| function AboutPage() { | |
| return ( | |
| <Box sx={{ width: "100%", maxWidth: 1200, margin: "0 auto", py: 4, px: 0 }}> | |
| <PageHeader | |
| title="About the EEG Finetune Arena" | |
| subtitle="Parameter-Efficient Fine-Tuning Benchmark for EEG Foundation Models" | |
| /> | |
| <Paper | |
| elevation={0} | |
| sx={{ | |
| p: 4, | |
| mb: 4, | |
| border: "1px solid", | |
| borderColor: "grey.200", | |
| borderRadius: 2, | |
| }} | |
| > | |
| <Typography variant="h5" sx={{ mb: 2 }}> | |
| What is the EEG Finetune Arena? | |
| </Typography> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| The EEG Finetune Arena is an open leaderboard for comparing | |
| parameter-efficient fine-tuning (PEFT) methods applied to EEG | |
| foundation models. We provide a standardized evaluation pipeline | |
| across diverse EEG downstream tasks, enabling fair and reproducible | |
| comparisons of how well different adapter methods can adapt | |
| pre-trained EEG models. | |
| </Typography> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| Built on top of{" "} | |
| <Link | |
| href="https://braindecode.org" | |
| target="_blank" | |
| rel="noopener noreferrer" | |
| > | |
| braindecode | |
| </Link> | |
| ,{" "} | |
| <Link | |
| href="https://moabb.neurotechx.com" | |
| target="_blank" | |
| rel="noopener noreferrer" | |
| > | |
| MOABB | |
| </Link> | |
| , and the{" "} | |
| <Link | |
| href="https://huggingface.co/docs/peft" | |
| target="_blank" | |
| rel="noopener noreferrer" | |
| > | |
| HuggingFace PEFT | |
| </Link>{" "} | |
| library, the arena evaluates combinations of 7 foundation models with | |
| 7 adapter methods across 14 EEG datasets. | |
| </Typography> | |
| </Paper> | |
| <Paper | |
| elevation={0} | |
| sx={{ | |
| p: 4, | |
| mb: 4, | |
| border: "1px solid", | |
| borderColor: "grey.200", | |
| borderRadius: 2, | |
| }} | |
| > | |
| <Typography variant="h5" sx={{ mb: 2 }}> | |
| Foundation Models | |
| </Typography> | |
| <Box component="ul" sx={{ pl: 3 }}> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>LaBraM</strong> - Vision Transformer for EEG with neural | |
| tokenization (12 layers, 200D embedding). Pre-trained on large-scale | |
| EEG data. | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>EEGPT</strong> - Transformer with patch-based EEG | |
| tokenization (~10M params, 8 layers, 512D embedding). | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>BIOT</strong> - Linear Attention Transformer for efficient | |
| EEG processing (4 layers, 256D embedding). | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>BENDR</strong> - CNN + BERT-inspired Transformer encoder | |
| (8 layers, 512D). | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>SignalJEPA</strong> - CNN + Transformer with JEPA-style | |
| predictive self-supervised learning. | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>CBraMod</strong> - Criss-Cross Transformer with separate | |
| spatial and temporal attention (~4M params, 12 layers, 200D). | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>REVE</strong> - Vision Transformer with GEGLU and visual | |
| encoding (22 layers, 512D embedding). | |
| </Typography> | |
| </li> | |
| </Box> | |
| </Paper> | |
| <Paper | |
| elevation={0} | |
| sx={{ | |
| p: 4, | |
| mb: 4, | |
| border: "1px solid", | |
| borderColor: "grey.200", | |
| borderRadius: 2, | |
| }} | |
| > | |
| <Typography variant="h5" sx={{ mb: 2 }}> | |
| Adapter Methods | |
| </Typography> | |
| <Box component="ul" sx={{ pl: 3 }}> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>LoRA</strong> - Low-Rank Adaptation (r=16, | |
| alpha=32): injects trainable low-rank matrices, ~98% parameter | |
| reduction. | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>IA3</strong> - Infused Adapter by Inhibiting | |
| and Amplifying Inner Activations: only learns scaling vectors, | |
| ~99.5% parameter reduction. | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>AdaLoRA</strong> - Adaptive Low-Rank | |
| Adaptation: dynamic rank allocation across layers for optimal | |
| budget distribution. | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>DoRA</strong> - Weight-Decomposed Low-Rank | |
| Adaptation: decomposes weights into magnitude and direction | |
| components. | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>OFT</strong> - Orthogonal Fine-Tuning: | |
| applies orthogonal transformations to preserve pre-trained | |
| features. | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>Probe</strong> - Linear probing baseline: | |
| freezes the encoder and trains only the classification head. | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>Full Fine-tune</strong> - Updates all model | |
| parameters (baseline for comparison). | |
| </Typography> | |
| </li> | |
| </Box> | |
| </Paper> | |
| <Paper | |
| elevation={0} | |
| sx={{ | |
| p: 4, | |
| mb: 4, | |
| border: "1px solid", | |
| borderColor: "grey.200", | |
| borderRadius: 2, | |
| }} | |
| > | |
| <Typography variant="h5" sx={{ mb: 2 }}> | |
| EEG Benchmarks | |
| </Typography> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| Models are evaluated across 8 primary downstream datasets spanning | |
| diverse EEG tasks: | |
| </Typography> | |
| <Typography variant="h6" sx={{ mt: 2, mb: 1 }}> | |
| Motor Imagery | |
| </Typography> | |
| <Box component="ul" sx={{ pl: 3 }}> | |
| <li> | |
| <Typography variant="body1" color="text.secondary"> | |
| <strong>BCIC-2a</strong> (BCI Competition IV 2a) - 4-class MI | |
| (left hand, right hand, feet, tongue), 9 subjects, 22 channels | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>PhysioNet MI</strong> - 4-class MI (left hand, right hand, | |
| feet, both hands), 109 subjects, 64 channels | |
| </Typography> | |
| </li> | |
| </Box> | |
| <Typography variant="h6" sx={{ mt: 2, mb: 1 }}> | |
| Sleep Staging | |
| </Typography> | |
| <Box component="ul" sx={{ pl: 3 }}> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>ISRUC-SLEEP</strong> (Group I) - 5-class sleep staging | |
| (W, N1, N2, N3, REM), ~100 subjects, 6 channels, 30s windows | |
| </Typography> | |
| </li> | |
| </Box> | |
| <Typography variant="h6" sx={{ mt: 2, mb: 1 }}> | |
| Pathology Detection | |
| </Typography> | |
| <Box component="ul" sx={{ pl: 3 }}> | |
| <li> | |
| <Typography variant="body1" color="text.secondary"> | |
| <strong>TUAB</strong> (TUH Abnormal v3.0.1) - Binary (normal / | |
| abnormal), 290+ subjects, 16 channels, 10s windows | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>TUEV</strong> (TUH Events v2.0.1) - 6-class event | |
| classification (SPSW, GPED, PLED, EYEM, ARTF, BCKG), 200+ | |
| subjects, 21 channels | |
| </Typography> | |
| </li> | |
| </Box> | |
| <Typography variant="h6" sx={{ mt: 2, mb: 1 }}> | |
| Seizure Detection | |
| </Typography> | |
| <Box component="ul" sx={{ pl: 3 }}> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>CHB-MIT</strong> - Binary seizure detection, 23 pediatric | |
| subjects, 17 channels, 10s windows | |
| </Typography> | |
| </li> | |
| </Box> | |
| <Typography variant="h6" sx={{ mt: 2, mb: 1 }}> | |
| Emotion Recognition | |
| </Typography> | |
| <Box component="ul" sx={{ pl: 3 }}> | |
| <li> | |
| <Typography variant="body1" color="text.secondary"> | |
| <strong>FACED</strong> - 9-class discrete emotion recognition, 123 | |
| subjects, 26 channels | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <strong>SEED-V</strong> - 5-class emotion recognition (Happy, Sad, | |
| Neutral, Disgust, Fear), 62 channels | |
| </Typography> | |
| </li> | |
| </Box> | |
| </Paper> | |
| <Paper | |
| elevation={0} | |
| sx={{ | |
| p: 4, | |
| border: "1px solid", | |
| borderColor: "grey.200", | |
| borderRadius: 2, | |
| }} | |
| > | |
| <Typography variant="h5" sx={{ mb: 2 }}> | |
| Resources | |
| </Typography> | |
| <Box component="ul" sx={{ pl: 3 }}> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <Link | |
| href="https://braindecode.org" | |
| target="_blank" | |
| rel="noopener noreferrer" | |
| > | |
| Braindecode | |
| </Link>{" "} | |
| - Deep learning toolbox for EEG decoding | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <Link | |
| href="https://moabb.neurotechx.com" | |
| target="_blank" | |
| rel="noopener noreferrer" | |
| > | |
| MOABB | |
| </Link>{" "} | |
| - Mother of All BCI Benchmarks | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <Link | |
| href="https://huggingface.co/docs/peft" | |
| target="_blank" | |
| rel="noopener noreferrer" | |
| > | |
| HuggingFace PEFT | |
| </Link>{" "} | |
| - Parameter-Efficient Fine-Tuning library | |
| </Typography> | |
| </li> | |
| <li> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| <Link | |
| href="https://github.com/braindecode/braindecode" | |
| target="_blank" | |
| rel="noopener noreferrer" | |
| > | |
| Braindecode GitHub | |
| </Link>{" "} | |
| - Source code and contributions | |
| </Typography> | |
| </li> | |
| </Box> | |
| </Paper> | |
| <CitationSection /> | |
| </Box> | |
| ); | |
| } | |
| const ARENA_APA = `Guetschel, P., Aristimunha, B., Truong, D., Kokate, K., Tangermann, M., & Delorme, A. (2026). Toward OpenEEG-Bench: A live community-driven benchmark for EEG foundation models. In Proceedings of the 34th European Signal Processing Conference (EUSIPCO 2026) (pp. 1–5). EURASIP.`; | |
| const BRAINDECODE_APA = `Aristimunha, B., Guetschel, P., Wimpff, M., Gemein, L., Rommel, C., Banville, H., Sliwowski, M., Wilson, D., Brandt, S., Gnassounou, T., Paillard, J., Junqueira Lopes, B., Sedlar, S., Moreau, T., Chevallier, S., Gramfort, A., & Schirrmeister, R. T. Braindecode: Toolbox for decoding raw electrophysiological brain data with deep learning models [Computer software]. Zenodo. https://doi.org/10.5281/zenodo.17699192`; | |
| const ARENA_BIBTEX = `@inproceedings{guetschel2026openeegbench, | |
| title = {Toward {OpenEEG-Bench}: A Live Community-Driven Benchmark for {EEG} Foundation Models}, | |
| author = {Guetschel, Pierre and Aristimunha, Bruno and Truong, Dung and Kokate, Kuntal and Tangermann, Michael and Delorme, Arnaud}, | |
| booktitle = {Proceedings of the 34th European Signal Processing Conference (EUSIPCO 2026)}, | |
| year = {2026}, | |
| address = {Bruges, Belgium}, | |
| month = aug, | |
| pages = {1--5}, | |
| organization = {EURASIP}, | |
| note = {Submitted to EUSIPCO 2026} | |
| }`; | |
| const BRAINDECODE_BIBTEX = `@software{braindecode, | |
| author = {Aristimunha, Bruno and | |
| Guetschel, Pierre and | |
| Wimpff, Martin and | |
| Gemein, Lukas and | |
| Rommel, Cedric and | |
| Banville, Hubert and | |
| Sliwowski, Maciej and | |
| Wilson, Daniel and | |
| Brandt, Simon and | |
| Gnassounou, Th\\'{e}o and | |
| Paillard, Joseph and | |
| {Junqueira Lopes}, Bruna and | |
| Sedlar, Sara and | |
| Moreau, Thomas and | |
| Chevallier, Sylvain and | |
| Gramfort, Alexandre and | |
| Schirrmeister, Robin Tibor}, | |
| title = {Braindecode: toolbox for decoding raw electrophysiological brain data | |
| with deep learning models}, | |
| url = {https://github.com/braindecode/braindecode}, | |
| doi = {10.5281/zenodo.17699192}, | |
| publisher = {Zenodo}, | |
| license = {BSD-3-Clause}, | |
| }`; | |
| function CopyButton({ text }) { | |
| const [copied, setCopied] = useState(false); | |
| const handleCopy = () => { | |
| navigator.clipboard.writeText(text).then(() => { | |
| setCopied(true); | |
| setTimeout(() => setCopied(false), 2000); | |
| }); | |
| }; | |
| return ( | |
| <Tooltip title={copied ? "Copied!" : "Copy BibTeX"}> | |
| <IconButton size="small" onClick={handleCopy} sx={{ color: "text.secondary" }}> | |
| {copied ? <CheckIcon fontSize="small" /> : <ContentCopyIcon fontSize="small" />} | |
| </IconButton> | |
| </Tooltip> | |
| ); | |
| } | |
| function CitationBlock({ label, apa, bibtex }) { | |
| return ( | |
| <Box sx={{ mb: 4 }}> | |
| <Typography variant="subtitle2" color="text.secondary" sx={{ mb: 1 }}> | |
| {label} | |
| </Typography> | |
| {/* APA */} | |
| <Box sx={{ display: "flex", alignItems: "flex-start", justifyContent: "space-between", mb: 0.5 }}> | |
| <Typography variant="caption" sx={{ fontWeight: 600, color: "text.secondary" }}> | |
| APA | |
| </Typography> | |
| <CopyButton text={apa} /> | |
| </Box> | |
| <Box | |
| component="p" | |
| sx={{ | |
| p: 2, | |
| bgcolor: "grey.50", | |
| border: "1px solid", | |
| borderColor: "grey.200", | |
| borderRadius: 1, | |
| fontSize: "0.85rem", | |
| lineHeight: 1.6, | |
| m: 0, | |
| mb: 2, | |
| }} | |
| > | |
| {apa} | |
| </Box> | |
| {/* BibTeX */} | |
| <Box sx={{ display: "flex", alignItems: "flex-start", justifyContent: "space-between", mb: 0.5 }}> | |
| <Typography variant="caption" sx={{ fontWeight: 600, color: "text.secondary" }}> | |
| BibTeX | |
| </Typography> | |
| <CopyButton text={bibtex} /> | |
| </Box> | |
| <Box | |
| component="pre" | |
| sx={{ | |
| p: 2, | |
| bgcolor: "grey.50", | |
| border: "1px solid", | |
| borderColor: "grey.200", | |
| borderRadius: 1, | |
| overflow: "auto", | |
| fontSize: "0.8rem", | |
| lineHeight: 1.5, | |
| fontFamily: "monospace", | |
| m: 0, | |
| }} | |
| > | |
| {bibtex} | |
| </Box> | |
| </Box> | |
| ); | |
| } | |
| function CitationSection() { | |
| return ( | |
| <Paper | |
| elevation={0} | |
| sx={{ | |
| p: 4, | |
| mt: 4, | |
| border: "1px solid", | |
| borderColor: "grey.200", | |
| borderRadius: 2, | |
| }} | |
| > | |
| <Typography variant="h5" sx={{ mb: 1 }}> | |
| Citation | |
| </Typography> | |
| <Typography variant="body1" color="text.secondary" paragraph> | |
| If you use OpenEEG-Bench or Braindecode in your research, please cite the | |
| following: | |
| </Typography> | |
| <CitationBlock label="OpenEEG-Bench (this leaderboard)" apa={ARENA_APA} bibtex={ARENA_BIBTEX} /> | |
| <CitationBlock label="Braindecode software" apa={BRAINDECODE_APA} bibtex={BRAINDECODE_BIBTEX} /> | |
| </Paper> | |
| ); | |
| } | |
| export default AboutPage; | |