ctorre commited on
Commit
988c261
·
verified ·
1 Parent(s): 6f6ca36

Upload folder using huggingface_hub

Browse files
Files changed (8) hide show
  1. .gitignore +216 -0
  2. .python-version +1 -0
  3. LICENSE +22 -0
  4. README.md +124 -0
  5. data/eeg-net.h5 +3 -0
  6. example_load_data.py +280 -0
  7. pyproject.toml +16 -0
  8. uv.lock +0 -0
.gitignore ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ # Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ # poetry.lock
109
+ # poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ # pdm.lock
116
+ # pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ # pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # Redis
135
+ *.rdb
136
+ *.aof
137
+ *.pid
138
+
139
+ # RabbitMQ
140
+ mnesia/
141
+ rabbitmq/
142
+ rabbitmq-data/
143
+
144
+ # ActiveMQ
145
+ activemq-data/
146
+
147
+ # SageMath parsed files
148
+ *.sage.py
149
+
150
+ # Environments
151
+ .env
152
+ .envrc
153
+ .venv
154
+ env/
155
+ venv/
156
+ ENV/
157
+ env.bak/
158
+ venv.bak/
159
+
160
+ # Spyder project settings
161
+ .spyderproject
162
+ .spyproject
163
+
164
+ # Rope project settings
165
+ .ropeproject
166
+
167
+ # mkdocs documentation
168
+ /site
169
+
170
+ # mypy
171
+ .mypy_cache/
172
+ .dmypy.json
173
+ dmypy.json
174
+
175
+ # Pyre type checker
176
+ .pyre/
177
+
178
+ # pytype static type analyzer
179
+ .pytype/
180
+
181
+ # Cython debug symbols
182
+ cython_debug/
183
+
184
+ # PyCharm
185
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
186
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
187
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
188
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
189
+ # .idea/
190
+
191
+ # Abstra
192
+ # Abstra is an AI-powered process automation framework.
193
+ # Ignore directories containing user credentials, local state, and settings.
194
+ # Learn more at https://abstra.io/docs
195
+ .abstra/
196
+
197
+ # Visual Studio Code
198
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
199
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
200
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
201
+ # you could uncomment the following to ignore the entire vscode folder
202
+ # .vscode/
203
+
204
+ # Ruff stuff:
205
+ .ruff_cache/
206
+
207
+ # PyPI configuration file
208
+ .pypirc
209
+
210
+ # Marimo
211
+ marimo/_static/
212
+ marimo/_lsp/
213
+ __marimo__/
214
+
215
+ # Streamlit
216
+ .streamlit/secrets.toml
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.12
LICENSE ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Jonathan Grizou
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
README.md ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ task_categories:
4
+ - other
5
+ language:
6
+ - en
7
+ tags:
8
+ - neuroscience
9
+ - brain-computer-interfacing
10
+ - eeg
11
+ - electroencephalography
12
+ - gan
13
+ - self-calibrating
14
+ - mental-imagery
15
+ pretty_name: Self-Calibrating BCI Dataset (NeurIPS 2025)
16
+ size_categories:
17
+ - 1K<n<10K
18
+ ---
19
+
20
+ # Self-Calibrating BCI Dataset (NeurIPS 2025)
21
+
22
+ **Self-Calibrating BCIs: Ranking and Recovery of Mental Targets Without Labels**
23
+
24
+ This dataset contains brain-computer interface (BCI) data from self-calibrating experiments where participants imagined target faces while EEG signals were recorded. The dataset includes:
25
+
26
+ - Face representations in GAN latent space
27
+ - Processed EEG features from neural networks
28
+ - 9,234 experimental trials
29
+
30
+ ## 📊 Dataset Summary
31
+
32
+ - **Domain**: Neuroscience, Brain-Computer Interfaces
33
+ - **Task**: Mental imagery, face recognition, BCI calibration
34
+ - **Size**: 9,234 samples, ~39 MB (HDF5 format)
35
+ - **Format**: HDF5 (archival-grade, language-agnostic)
36
+ - **License**: MIT
37
+
38
+ ## 🎯 Dataset Structure
39
+
40
+ The data is stored in HDF5 format (`all_data_sorted.h5`) with three main arrays:
41
+
42
+ | Array | Shape | Dtype | Description |
43
+ | ---------------- | ----------- | ------- | -------------------------------------------- |
44
+ | `target_faces` | (9234, 512) | float64 | Target face latent vectors (Progressive GAN) |
45
+ | `observed_faces` | (9234, 512) | float64 | Observed face latent vectors |
46
+ | `eeg_net` | (9234, 176) | float32 | Neural network processed EEG features |
47
+
48
+ All arrays are aligned: row `i` in each array corresponds to the same experimental trial.
49
+
50
+ ### Data Fields
51
+
52
+ #### `target_faces`
53
+
54
+ - **Description**: 512-dimensional latent vectors from Progressive GAN representing faces participants were trying to imagine
55
+ - **GAN Model**: Progressive GAN trained on CelebA-HQ 1024×1024
56
+ - **Value Range**: Approximately [-5, 5] (latent space coordinates)
57
+ - **Usage**: Ground truth for BCI task; can be decoded to face images using GAN decoder
58
+
59
+ #### `observed_faces`
60
+
61
+ - **Description**: 512-dimensional latent vectors for faces actually presented/selected during trials
62
+ - **Relationship**: Distance to `target_faces` measures BCI performance
63
+ - **Usage**: Compare with `target_faces` to evaluate mental imagery accuracy
64
+
65
+ #### `eeg_net`
66
+
67
+ - **Description**: 176-dimensional learned representations from EEG signals
68
+ - **Processing**: Neural network feature extraction from raw EEG data
69
+ - **Electrodes**: Derived from 29-channel EEG system
70
+ - **Usage**: Input features for BCI decoding models
71
+
72
+ ## 💻 Usage Examples
73
+
74
+ Run `example_load_data.py`.
75
+
76
+ ## 📖 Data Collection
77
+
78
+ **Experimental Setup:**
79
+
80
+ - Participants imagined target faces while EEG was recorded
81
+ - 29-channel EEG system
82
+ - Face stimuli generated from Progressive GAN latent space
83
+ - Self-calibrating paradigm (no labeled training data)
84
+
85
+ **Processing Pipeline:**
86
+
87
+ 1. Raw EEG → Windowing & feature extraction → 203 features
88
+ 2. 203 features → Neural network → 176-dim embeddings (`eeg_net`)
89
+ 3. Face images → GAN encoder → 512-dim latent vectors
90
+
91
+ ## 📄 Citation
92
+
93
+ If you use this dataset, please cite:
94
+
95
+ ```bibtex
96
+ @article{grizou2025self,
97
+ title={Self-Calibrating BCIs: Ranking and Recovery of Mental Targets Without Labels},
98
+ author={Grizou, Jonathan and de la Torre-Ortiz, Carlos and Ruotsalo, Tuukka},
99
+ journal={Advances in Neural Information Processing Systems},
100
+ year={2025}
101
+ }
102
+ ```
103
+
104
+ ## 📜 License
105
+
106
+ This dataset is released under the MIT License.
107
+
108
+ ## 🔗 Related Resources
109
+
110
+ - **Paper Repository**: [github.com/jgrizou/neurips-self-calibrating-bci](https://github.com/jgrizou/neurips-self-calibrating-bci)
111
+ - **GAN Checkpoint**: Progressive GAN for CelebA-HQ 1024×1024
112
+ - **Contact**: jonathan.grizou@grizai.com
113
+
114
+ ## 🤝 Contributions
115
+
116
+ This dataset was created as part of the NeurIPS 2025 paper. For questions, issues, or suggestions, please contact jonathan.grizou@grizai.com or open an issue on the paper repository.
117
+
118
+ ## 🙏 Acknowledgments
119
+
120
+ Jonathan Grizou conducted this work during his tenure as an Assistant Professor at the University of Glasgow and subsequently through GrizAI Ltd. We gratefully acknowledge the financial support of both organizations.
121
+
122
+ This research was partially funded by the Alfred Kordelin Foundation (grant 230099) and the Finnish Foundation for Technology Promotion (grant 10168).
123
+
124
+ Computing and storage resources were provided by the Finnish Computing Competence Infrastructure (FCCI; HILE ERC grant ILLUMINATOR, 101114623).
data/eeg-net.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82cddd25951e742a1bc8596ccc53245b8b3530c68d870b3308d512ebc13618b0
3
+ size 40673742
example_load_data.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Example script demonstrating how to load and explore the Self-Calibrating BCI Dataset (NeurIPS 2025).
4
+
5
+ This script shows:
6
+ 1. How to open and read the HDF5 file
7
+ 2. How to access the data arrays
8
+ 3. How to read embedded metadata
9
+ 4. Basic data exploration and statistics
10
+
11
+ Requirements:
12
+ # Using uv (recommended)
13
+ uv sync
14
+ uv run python example_load_data.py
15
+
16
+ # Or using pip
17
+ pip install ... (check pyproject.toml)
18
+ """
19
+
20
+ from enum import StrEnum, auto, unique
21
+ from pathlib import Path
22
+
23
+ import h5py
24
+ import numpy as np
25
+ from pydantic import BaseModel, ConfigDict, Field
26
+
27
+ _ROOT_PATH = Path(__file__).parent
28
+ _DATA_DIR_PATH = _ROOT_PATH / "data"
29
+ _DATA_FILE_PATH = _DATA_DIR_PATH / "eeg-net.h5"
30
+
31
+ _SEPARATOR = "=" * 60
32
+
33
+
34
+ class _Data(BaseModel):
35
+ """Container for sample data arrays."""
36
+
37
+ model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
38
+
39
+ target_faces: np.ndarray = Field(..., description="Target face latent vectors")
40
+ observed_faces: np.ndarray = Field(..., description="Observed face latent vectors")
41
+ eeg_features: np.ndarray = Field(..., description="EEG feature vectors")
42
+
43
+
44
+ # Rebuild model to handle forward references
45
+ _Data.model_rebuild()
46
+
47
+
48
+ @unique
49
+ class _RootMetadataKeys(StrEnum):
50
+ """Root-level metadata keys in the HDF5 file."""
51
+
52
+ TITLE = auto()
53
+ PAPER_TITLE = auto()
54
+ AUTHORS = auto()
55
+ YEAR = auto()
56
+ CONFERENCE = auto()
57
+ LICENSE = auto()
58
+ CONTACT_EMAIL = auto()
59
+
60
+
61
+ @unique
62
+ class _DatasetMetadataKeys(StrEnum):
63
+ """Dataset-level metadata keys in the HDF5 file."""
64
+
65
+ DESCRIPTION = auto()
66
+ DIMENSIONS = auto()
67
+ LATENT_DIM = auto()
68
+ GAN_MODEL = auto()
69
+ VALUE_RANGE = auto()
70
+
71
+
72
+ def _print_separator(title: str = "") -> None:
73
+ """Print a formatted separator line.
74
+
75
+ Args:
76
+ title: Optional title to center in the separator
77
+ """
78
+ if title:
79
+ print("\n{}".format(_SEPARATOR))
80
+ print("{}".format(title).center(60))
81
+ print("{}".format(_SEPARATOR))
82
+ else:
83
+ print("{}".format(_SEPARATOR))
84
+
85
+
86
+ def display_dataset_overview(file: h5py.File) -> None:
87
+ """Display basic dataset information.
88
+
89
+ Args:
90
+ file: Open HDF5 file handle
91
+ """
92
+ _print_separator("Dataset Overview")
93
+
94
+ print("\nAvailable datasets: {}".format(list(file.keys())))
95
+ print("Number of samples: {}".format(file.attrs["n_samples"]))
96
+
97
+ # Show dataset shapes
98
+ print("\nDataset shapes:")
99
+ for key in file.keys():
100
+ shape = file[key].shape
101
+ dtype = str(file[key].dtype)
102
+ size_mb = file[key].nbytes / (1024**2)
103
+ print(
104
+ " {:20s}: {:20s} {:8s} ({:5.1f} MB)".format(
105
+ key,
106
+ str(shape),
107
+ dtype,
108
+ size_mb,
109
+ )
110
+ )
111
+
112
+
113
+ def display_metadata(file: h5py.File, max_length: int = 80) -> None:
114
+ """Display root-level metadata from the HDF5 file.
115
+
116
+ Args:
117
+ file: Open HDF5 file handle
118
+ max_length: Maximum length for string values before truncation
119
+ """
120
+ _print_separator("Metadata")
121
+
122
+ for attr in _RootMetadataKeys:
123
+ attr_value = attr.value
124
+ if attr_value in file.attrs:
125
+ value = file.attrs[attr_value]
126
+ # Truncate long values
127
+ if isinstance(value, str) and len(value) > max_length:
128
+ value = value[: max_length - 3] + "..."
129
+ print(" {:20s}: {}".format(attr_value, value))
130
+
131
+
132
+ def _load_sample_data(
133
+ file: h5py.File,
134
+ *,
135
+ n_samples: int = 100,
136
+ ) -> _Data:
137
+ """Load a sample of data for exploration.
138
+
139
+ Args:
140
+ file: Open HDF5 file handle
141
+ n_samples: Number of samples to load (default: 100)
142
+
143
+ Returns:
144
+ Data container with target_faces, observed_faces, and eeg_features
145
+ """
146
+ _print_separator("Data Exploration")
147
+
148
+ print("\nLoading first {} samples for exploration...".format(n_samples))
149
+ target_faces = file["target_faces"][:n_samples]
150
+ observed_faces = file["observed_faces"][:n_samples]
151
+ eeg_features = file["eeg_net"][:n_samples]
152
+
153
+ print(" Loaded target_faces: {}".format(target_faces.shape))
154
+ print(" Loaded observed_faces: {}".format(observed_faces.shape))
155
+ print(" Loaded eeg_features: {}".format(eeg_features.shape))
156
+
157
+ return _Data(
158
+ target_faces=target_faces,
159
+ observed_faces=observed_faces,
160
+ eeg_features=eeg_features,
161
+ )
162
+
163
+
164
+ def compute_statistics(
165
+ *,
166
+ target_faces: np.ndarray,
167
+ observed_faces: np.ndarray,
168
+ eeg_features: np.ndarray,
169
+ ) -> None:
170
+ """Compute and display statistics on the sample data.
171
+
172
+ Args:
173
+ target_faces: Target face latent vectors
174
+ observed_faces: Observed face latent vectors
175
+ eeg_features: EEG feature vectors
176
+ """
177
+ _print_separator("Data Statistics (first 100 samples)")
178
+
179
+ # Face distances (BCI performance metric)
180
+ distances = np.linalg.norm(target_faces - observed_faces, axis=1)
181
+
182
+ print("\nFace distances (target vs observed):")
183
+ print(" Mean distance: {:.4f}".format(distances.mean()))
184
+ print(" Median distance: {:.4f}".format(np.median(distances)))
185
+ print(" Std distance: {:.4f}".format(distances.std()))
186
+ print(" Min distance: {:.4f}".format(distances.min()))
187
+ print(" Max distance: {:.4f}".format(distances.max()))
188
+
189
+ # EEG feature statistics
190
+ print("\nEEG features statistics:")
191
+ print(" Mean: {:.6f}".format(eeg_features.mean()))
192
+ print(" Std: {:.6f}".format(eeg_features.std()))
193
+ print(" Min: {:.6f}".format(eeg_features.min()))
194
+ print(" Max: {:.6f}".format(eeg_features.max()))
195
+
196
+
197
+ def _display_dataset_metadata(
198
+ file: h5py.File,
199
+ *,
200
+ dataset_name: str = "target_faces",
201
+ ) -> None:
202
+ """Display metadata for a specific dataset.
203
+
204
+ Args:
205
+ file: Open HDF5 file handle
206
+ dataset_name: Name of the dataset to display metadata for
207
+ """
208
+ _print_separator("Dataset-Specific Metadata")
209
+
210
+ formatted_name = dataset_name.capitalize().replace("_", " ")
211
+ print("\n{} metadata:".format(formatted_name))
212
+ ds = file[dataset_name]
213
+
214
+ for key in _DatasetMetadataKeys:
215
+ key_value = key.value
216
+ if key_value in ds.attrs:
217
+ value = ds.attrs[key_value]
218
+ if isinstance(value, str) and len(value) > 60:
219
+ value = value[:57] + "..."
220
+ print(" {:15s}: {}".format(key_value, value))
221
+
222
+
223
+ def _load_and_explore_dataset(filepath: Path = _DATA_FILE_PATH) -> None:
224
+ """Orchestrate loading and exploring the dataset.
225
+
226
+ This function coordinates all the individual display functions to provide
227
+ a complete overview of the dataset.
228
+
229
+ Args:
230
+ filepath: Path to the HDF5 data file
231
+ """
232
+ _print_separator("Self-Calibrating BCI Dataset (NeurIPS 2025)")
233
+ print("Loading: {}".format(filepath))
234
+
235
+ with h5py.File(str(filepath), "r") as f:
236
+ # Display basic information
237
+ display_dataset_overview(f)
238
+
239
+ # Display metadata
240
+ display_metadata(f)
241
+
242
+ # Load sample data
243
+ data = _load_sample_data(f, n_samples=100)
244
+
245
+ # Compute and display statistics
246
+ compute_statistics(
247
+ target_faces=data.target_faces,
248
+ observed_faces=data.observed_faces,
249
+ eeg_features=data.eeg_features,
250
+ )
251
+
252
+ # Display dataset-specific metadata
253
+ _display_dataset_metadata(f, dataset_name="target_faces")
254
+
255
+ _print_separator()
256
+ print("\n✅ Dataset loaded and explored successfully!")
257
+ print()
258
+
259
+
260
+ def main() -> None:
261
+ """Main entry point with error handling."""
262
+ try:
263
+ _load_and_explore_dataset(_DATA_FILE_PATH)
264
+ except FileNotFoundError:
265
+ print("\n❌ Error: {} not found!".format(_DATA_FILE_PATH))
266
+ print("\nPlease ensure the data file is in the correct location.")
267
+ except ImportError as e:
268
+ print("\n❌ Error: Missing required package: {}".format(e))
269
+ print("\nPlease install required packages:")
270
+ print(" uv sync (recommended)")
271
+ print(" or: pip install ... (check pyproject.toml)")
272
+ except Exception as e:
273
+ print("\n❌ Error: {}".format(e))
274
+ import traceback
275
+
276
+ traceback.print_exc()
277
+
278
+
279
+ if __name__ == "__main__":
280
+ main()
pyproject.toml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "self-calibrating-bci"
3
+ version = "0.1.0"
4
+ description = "Hugging Face data repository for Self-calibrating BCI project (NeurIPS 2025)"
5
+ readme = "README.md"
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ "huggingface-hub>=0.20.0",
9
+ "numpy>=1.19.0",
10
+ "pandas>=1.3.0",
11
+ "pyarrow>=10.0.0",
12
+ "tqdm>=4.65.0",
13
+ "click>=8.0.0",
14
+ "h5py>=3.10.0",
15
+ "pydantic>=2.12.4",
16
+ ]
uv.lock ADDED
The diff for this file is too large to render. See raw diff