|
|
|
|
|
""" |
|
|
Example script demonstrating how to load and explore the Self-Calibrating BCI Dataset (NeurIPS 2025). |
|
|
|
|
|
This script shows: |
|
|
1. How to open and read the HDF5 file |
|
|
2. How to access the data arrays |
|
|
3. How to read embedded metadata |
|
|
4. Basic data exploration and statistics |
|
|
|
|
|
Requirements: |
|
|
# Using uv (recommended) |
|
|
uv sync |
|
|
uv run python example_load_data.py |
|
|
|
|
|
# Or using pip |
|
|
pip install ... (check pyproject.toml) |
|
|
""" |
|
|
|
|
|
from enum import StrEnum, auto, unique |
|
|
from pathlib import Path |
|
|
|
|
|
import h5py |
|
|
import numpy as np |
|
|
from pydantic import BaseModel, ConfigDict, Field |
|
|
|
|
|
_ROOT_PATH = Path(__file__).parent |
|
|
_DATA_DIR_PATH = _ROOT_PATH / "data" |
|
|
_DATA_FILE_PATH = _DATA_DIR_PATH / "eeg-net.h5" |
|
|
|
|
|
_SEPARATOR = "=" * 60 |
|
|
|
|
|
|
|
|
class _Data(BaseModel): |
|
|
"""Container for sample data arrays.""" |
|
|
|
|
|
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True) |
|
|
|
|
|
target_faces: np.ndarray = Field(..., description="Target face latent vectors") |
|
|
observed_faces: np.ndarray = Field(..., description="Observed face latent vectors") |
|
|
eeg_features: np.ndarray = Field(..., description="EEG feature vectors") |
|
|
|
|
|
|
|
|
|
|
|
_Data.model_rebuild() |
|
|
|
|
|
|
|
|
@unique |
|
|
class _RootMetadataKeys(StrEnum): |
|
|
"""Root-level metadata keys in the HDF5 file.""" |
|
|
|
|
|
TITLE = auto() |
|
|
PAPER_TITLE = auto() |
|
|
AUTHORS = auto() |
|
|
YEAR = auto() |
|
|
CONFERENCE = auto() |
|
|
LICENSE = auto() |
|
|
CONTACT_EMAIL = auto() |
|
|
|
|
|
|
|
|
@unique |
|
|
class _DatasetMetadataKeys(StrEnum): |
|
|
"""Dataset-level metadata keys in the HDF5 file.""" |
|
|
|
|
|
DESCRIPTION = auto() |
|
|
DIMENSIONS = auto() |
|
|
LATENT_DIM = auto() |
|
|
GAN_MODEL = auto() |
|
|
VALUE_RANGE = auto() |
|
|
|
|
|
|
|
|
def _print_separator(title: str = "") -> None: |
|
|
"""Print a formatted separator line. |
|
|
|
|
|
Args: |
|
|
title: Optional title to center in the separator |
|
|
""" |
|
|
if title: |
|
|
print("\n{}".format(_SEPARATOR)) |
|
|
print("{}".format(title).center(60)) |
|
|
print("{}".format(_SEPARATOR)) |
|
|
else: |
|
|
print("{}".format(_SEPARATOR)) |
|
|
|
|
|
|
|
|
def display_dataset_overview(file: h5py.File) -> None: |
|
|
"""Display basic dataset information. |
|
|
|
|
|
Args: |
|
|
file: Open HDF5 file handle |
|
|
""" |
|
|
_print_separator("Dataset Overview") |
|
|
|
|
|
print("\nAvailable datasets: {}".format(list(file.keys()))) |
|
|
print("Number of samples: {}".format(file.attrs["n_samples"])) |
|
|
|
|
|
|
|
|
print("\nDataset shapes:") |
|
|
for key in file.keys(): |
|
|
shape = file[key].shape |
|
|
dtype = str(file[key].dtype) |
|
|
size_mb = file[key].nbytes / (1024**2) |
|
|
print( |
|
|
" {:20s}: {:20s} {:8s} ({:5.1f} MB)".format( |
|
|
key, |
|
|
str(shape), |
|
|
dtype, |
|
|
size_mb, |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
def display_metadata(file: h5py.File, max_length: int = 80) -> None: |
|
|
"""Display root-level metadata from the HDF5 file. |
|
|
|
|
|
Args: |
|
|
file: Open HDF5 file handle |
|
|
max_length: Maximum length for string values before truncation |
|
|
""" |
|
|
_print_separator("Metadata") |
|
|
|
|
|
for attr in _RootMetadataKeys: |
|
|
attr_value = attr.value |
|
|
if attr_value in file.attrs: |
|
|
value = file.attrs[attr_value] |
|
|
|
|
|
if isinstance(value, str) and len(value) > max_length: |
|
|
value = value[: max_length - 3] + "..." |
|
|
print(" {:20s}: {}".format(attr_value, value)) |
|
|
|
|
|
|
|
|
def _load_sample_data( |
|
|
file: h5py.File, |
|
|
*, |
|
|
n_samples: int = 100, |
|
|
) -> _Data: |
|
|
"""Load a sample of data for exploration. |
|
|
|
|
|
Args: |
|
|
file: Open HDF5 file handle |
|
|
n_samples: Number of samples to load (default: 100) |
|
|
|
|
|
Returns: |
|
|
Data container with target_faces, observed_faces, and eeg_features |
|
|
""" |
|
|
_print_separator("Data Exploration") |
|
|
|
|
|
print("\nLoading first {} samples for exploration...".format(n_samples)) |
|
|
target_faces = file["target_faces"][:n_samples] |
|
|
observed_faces = file["observed_faces"][:n_samples] |
|
|
eeg_features = file["eeg_net"][:n_samples] |
|
|
|
|
|
print(" Loaded target_faces: {}".format(target_faces.shape)) |
|
|
print(" Loaded observed_faces: {}".format(observed_faces.shape)) |
|
|
print(" Loaded eeg_features: {}".format(eeg_features.shape)) |
|
|
|
|
|
return _Data( |
|
|
target_faces=target_faces, |
|
|
observed_faces=observed_faces, |
|
|
eeg_features=eeg_features, |
|
|
) |
|
|
|
|
|
|
|
|
def compute_statistics( |
|
|
*, |
|
|
target_faces: np.ndarray, |
|
|
observed_faces: np.ndarray, |
|
|
eeg_features: np.ndarray, |
|
|
) -> None: |
|
|
"""Compute and display statistics on the sample data. |
|
|
|
|
|
Args: |
|
|
target_faces: Target face latent vectors |
|
|
observed_faces: Observed face latent vectors |
|
|
eeg_features: EEG feature vectors |
|
|
""" |
|
|
_print_separator("Data Statistics (first 100 samples)") |
|
|
|
|
|
|
|
|
distances = np.linalg.norm(target_faces - observed_faces, axis=1) |
|
|
|
|
|
print("\nFace distances (target vs observed):") |
|
|
print(" Mean distance: {:.4f}".format(distances.mean())) |
|
|
print(" Median distance: {:.4f}".format(np.median(distances))) |
|
|
print(" Std distance: {:.4f}".format(distances.std())) |
|
|
print(" Min distance: {:.4f}".format(distances.min())) |
|
|
print(" Max distance: {:.4f}".format(distances.max())) |
|
|
|
|
|
|
|
|
print("\nEEG features statistics:") |
|
|
print(" Mean: {:.6f}".format(eeg_features.mean())) |
|
|
print(" Std: {:.6f}".format(eeg_features.std())) |
|
|
print(" Min: {:.6f}".format(eeg_features.min())) |
|
|
print(" Max: {:.6f}".format(eeg_features.max())) |
|
|
|
|
|
|
|
|
def _display_dataset_metadata( |
|
|
file: h5py.File, |
|
|
*, |
|
|
dataset_name: str = "target_faces", |
|
|
) -> None: |
|
|
"""Display metadata for a specific dataset. |
|
|
|
|
|
Args: |
|
|
file: Open HDF5 file handle |
|
|
dataset_name: Name of the dataset to display metadata for |
|
|
""" |
|
|
_print_separator("Dataset-Specific Metadata") |
|
|
|
|
|
formatted_name = dataset_name.capitalize().replace("_", " ") |
|
|
print("\n{} metadata:".format(formatted_name)) |
|
|
ds = file[dataset_name] |
|
|
|
|
|
for key in _DatasetMetadataKeys: |
|
|
key_value = key.value |
|
|
if key_value in ds.attrs: |
|
|
value = ds.attrs[key_value] |
|
|
if isinstance(value, str) and len(value) > 60: |
|
|
value = value[:57] + "..." |
|
|
print(" {:15s}: {}".format(key_value, value)) |
|
|
|
|
|
|
|
|
def _load_and_explore_dataset(filepath: Path = _DATA_FILE_PATH) -> None: |
|
|
"""Orchestrate loading and exploring the dataset. |
|
|
|
|
|
This function coordinates all the individual display functions to provide |
|
|
a complete overview of the dataset. |
|
|
|
|
|
Args: |
|
|
filepath: Path to the HDF5 data file |
|
|
""" |
|
|
_print_separator("Self-Calibrating BCI Dataset (NeurIPS 2025)") |
|
|
print("Loading: {}".format(filepath)) |
|
|
|
|
|
with h5py.File(str(filepath), "r") as f: |
|
|
|
|
|
display_dataset_overview(f) |
|
|
|
|
|
|
|
|
display_metadata(f) |
|
|
|
|
|
|
|
|
data = _load_sample_data(f, n_samples=100) |
|
|
|
|
|
|
|
|
compute_statistics( |
|
|
target_faces=data.target_faces, |
|
|
observed_faces=data.observed_faces, |
|
|
eeg_features=data.eeg_features, |
|
|
) |
|
|
|
|
|
|
|
|
_display_dataset_metadata(f, dataset_name="target_faces") |
|
|
|
|
|
_print_separator() |
|
|
print("\n✅ Dataset loaded and explored successfully!") |
|
|
print() |
|
|
|
|
|
|
|
|
def main() -> None: |
|
|
"""Main entry point with error handling.""" |
|
|
try: |
|
|
_load_and_explore_dataset(_DATA_FILE_PATH) |
|
|
except FileNotFoundError: |
|
|
print("\n❌ Error: {} not found!".format(_DATA_FILE_PATH)) |
|
|
print("\nPlease ensure the data file is in the correct location.") |
|
|
except ImportError as e: |
|
|
print("\n❌ Error: Missing required package: {}".format(e)) |
|
|
print("\nPlease install required packages:") |
|
|
print(" uv sync (recommended)") |
|
|
print(" or: pip install ... (check pyproject.toml)") |
|
|
except Exception as e: |
|
|
print("\n❌ Error: {}".format(e)) |
|
|
import traceback |
|
|
|
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|