| { |
| "@context": { |
| "@language": "en", |
| "@vocab": "https://schema.org/", |
| "sc": "https://schema.org/", |
| "cr": "http://mlcommons.org/croissant/", |
| "rai": "http://mlcommons.org/croissant/RAI/", |
| "dct": "http://purl.org/dc/terms/", |
| "citeAs": "cr:citeAs", |
| "conformsTo": "dct:conformsTo", |
| "containedIn": "cr:containedIn", |
| "data": { |
| "@id": "cr:data", |
| "@type": "@json" |
| }, |
| "dataType": { |
| "@id": "cr:dataType", |
| "@type": "@vocab" |
| }, |
| "distribution": "cr:distribution", |
| "extract": "cr:extract", |
| "field": "cr:field", |
| "fileObject": "cr:fileObject", |
| "fileProperty": "cr:fileProperty", |
| "fileSet": "cr:fileSet", |
| "format": "cr:format", |
| "includes": "cr:includes", |
| "excludes": "cr:excludes", |
| "isArray": "cr:isArray", |
| "isLiveDataset": "cr:isLiveDataset", |
| "jsonPath": "cr:jsonPath", |
| "key": "cr:key", |
| "recordSet": "cr:recordSet", |
| "references": "cr:references", |
| "regex": "cr:regex", |
| "source": "cr:source", |
| "subField": "cr:subField", |
| "transform": "cr:transform" |
| }, |
| "@type": "sc:Dataset", |
| "@id": "https://huggingface.co/datasets/pnpl/LibriBrain2", |
| "name": "LibriBrain2", |
| "alternateName": [ |
| "pnpl/LibriBrain2", |
| "LibriBrain100 component" |
| ], |
| "description": "LibriBrain2 is the second release from the Parker Jones Neural Processing Lab (PNPL) LibriBrain dataset series. Together with the original LibriBrain release, it forms LibriBrain100: a broad-and-deep MEG resource for neural speech decoding. LibriBrain2 extends the original single-subject Sherlock Holmes recordings with additional subject-0 material, including the remaining Sherlock Holmes canon, TIMIT, MOCHA-TIMIT, and The Moth podcast narratives, and adds approximately 40 minutes of Sherlock audiobook MEG recordings from each of 32 additional subjects. The data were collected during passive listening to continuous speech and are intended for reproducible benchmarking of non-invasive neural speech decoding, including word classification, phonetic/semantic analyses, and cross-subject generalisation. Data are planned for release in raw BIDS/FIF form and preprocessed serialised HDF5 form, with paired TSV event files containing time-locked linguistic annotations.", |
| "creator": { |
| "@type": "Organization", |
| "name": "Parker Jones Neural Processing Lab (PNPL)", |
| "url": "https://huggingface.co/pnpl" |
| }, |
| "publisher": { |
| "@type": "Organization", |
| "name": "Parker Jones Neural Processing Lab (PNPL)", |
| "url": "https://huggingface.co/pnpl" |
| }, |
| "maintainer": { |
| "@type": "Organization", |
| "name": "Parker Jones Neural Processing Lab (PNPL)", |
| "url": "https://huggingface.co/pnpl" |
| }, |
| "url": "https://huggingface.co/datasets/pnpl/LibriBrain2", |
| "license": "https://creativecommons.org/licenses/by-nc/4.0/", |
| "isLiveDataset": false, |
| "inLanguage": "en", |
| "keywords": [ |
| "MEG", |
| "magnetoencephalography", |
| "neural speech decoding", |
| "brain-to-text", |
| "non-invasive BCI", |
| "speech perception", |
| "word classification", |
| "phoneme classification", |
| "English", |
| "audio", |
| "BIDS", |
| "FIF", |
| "HDF5", |
| "cc-by-nc-4.0" |
| ], |
| "measurementTechnique": [ |
| "Magnetoencephalography (MEG)", |
| "Passive listening to continuous speech" |
| ], |
| "variableMeasured": [ |
| "MEG sensor time series", |
| "Speech/non-speech event annotations", |
| "Phoneme event annotations", |
| "Word event annotations" |
| ], |
| "spatialCoverage": { |
| "@type": "Place", |
| "name": "Oxford, United Kingdom" |
| }, |
| "conformsTo": "http://mlcommons.org/croissant/1.1", |
| "ethicsPolicy": "Data collection was approved by the University of Oxford Medical Sciences Interdivisional Research Ethics Committee (R90053/RE003). Participants provided informed consent for pseudonymised data sharing.", |
| "distribution": [ |
| { |
| "@type": "cr:FileObject", |
| "@id": "repo", |
| "name": "LibriBrain2 Hugging Face repository", |
| "description": "The Hugging Face dataset repository for LibriBrain2. Replace this with the final repository revision URL if pinning to a specific release.", |
| "contentUrl": "https://huggingface.co/datasets/pnpl/LibriBrain2", |
| "encodingFormat": "git+https" |
| }, |
| { |
| "@type": "cr:FileSet", |
| "@id": "raw-bids-fif-files", |
| "name": "Raw BIDS/FIF MEG files", |
| "description": "Raw MEG recordings in a standard BIDS directory structure using FIF files. Patterns cover BIDS layouts with and without an explicit ses-* level.", |
| "containedIn": { |
| "@id": "repo" |
| }, |
| "encodingFormat": "application/octet-stream", |
| "includes": [ |
| "sub-*/ses-*/meg/*_meg.fif", |
| "sub-*/meg/*_meg.fif" |
| ] |
| }, |
| { |
| "@type": "cr:FileSet", |
| "@id": "preprocessed-hdf5-files", |
| "name": "Preprocessed HDF5 MEG files", |
| "description": "Minimally preprocessed, serialised MEG recordings intended for machine-learning workflows, stored under a BIDS derivatives-style layout. Patterns cover layouts with and without an explicit ses-* level.", |
| "containedIn": { |
| "@id": "repo" |
| }, |
| "encodingFormat": "application/x-hdf5", |
| "includes": [ |
| "derivatives/**/sub-*/ses-*/meg/*.h5", |
| "derivatives/**/sub-*/meg/*.h5", |
| "derivatives/**/sub-*/ses-*/meg/*.hdf5", |
| "derivatives/**/sub-*/meg/*.hdf5" |
| ] |
| }, |
| { |
| "@type": "cr:FileSet", |
| "@id": "event-tsv-files", |
| "name": "Paired linguistic event TSV files", |
| "description": "BIDS-compatible event annotations paired with MEG data files, including speech/non-speech, phoneme, and word-level labels where available. Patterns cover BIDS layouts with and without an explicit ses-* level.", |
| "containedIn": { |
| "@id": "repo" |
| }, |
| "encodingFormat": "text/tab-separated-values", |
| "includes": [ |
| "sub-*/ses-*/meg/*_events.tsv", |
| "sub-*/meg/*_events.tsv", |
| "derivatives/**/sub-*/ses-*/meg/*_events.tsv", |
| "derivatives/**/sub-*/meg/*_events.tsv" |
| ] |
| }, |
| { |
| "@type": "cr:FileSet", |
| "@id": "stimulus-audio-files", |
| "name": "Stimulus audio files", |
| "description": "Stimulus audio files, if redistributed with the dataset, using the conventional BIDS stimuli/ directory. Remove this FileSet if audio is not redistributed in LibriBrain2.", |
| "containedIn": { |
| "@id": "repo" |
| }, |
| "encodingFormat": "audio/wav", |
| "includes": [ |
| "stimuli/**/*.wav", |
| "stimuli/**/*.flac", |
| "stimuli/**/*.mp3" |
| ] |
| }, |
| { |
| "@type": "cr:FileSet", |
| "@id": "metadata-files", |
| "name": "Metadata and split definition files", |
| "description": "Dataset-level metadata, BIDS sidecars, participants metadata, channel/sensor metadata, README files, and split definition files.", |
| "containedIn": { |
| "@id": "repo" |
| }, |
| "encodingFormat": [ |
| "text/plain", |
| "application/json", |
| "text/tab-separated-values", |
| "text/csv" |
| ], |
| "includes": [ |
| "README*", |
| "CHANGES*", |
| "dataset_description.json", |
| "participants.tsv", |
| "participants.json", |
| "sub-*/ses-*/meg/*_channels.tsv", |
| "sub-*/meg/*_channels.tsv", |
| "sub-*/ses-*/meg/*_meg.json", |
| "sub-*/meg/*_meg.json", |
| "sub-*/ses-*/meg/*_coordsystem.json", |
| "sub-*/meg/*_coordsystem.json", |
| "derivatives/**/dataset_description.json", |
| "derivatives/**/*split*.json", |
| "derivatives/**/*split*.csv", |
| "derivatives/**/*split*.tsv" |
| ] |
| } |
| ], |
| "recordSet": [], |
| "rai:dataCollection": "MEG recordings were collected at the Oxford Centre for Human Brain Activity (OHBA) using a MEGIN TRIUX Neo system with 306 SQUID sensors (102 magnetometers and 204 gradiometers) while proficient English-speaking participants listened passively to continuous speech. LibriBrain2 contains the new data that extends the original LibriBrain release into LibriBrain100: additional deep recordings for subject 0 and broad recordings from 32 further subjects. Participants provided informed consent for pseudonymised data sharing.", |
| "rai:dataCollectionType": [ |
| "Experiments", |
| "Direct measurement", |
| "Physical data collection", |
| "Secondary Data analysis", |
| "Manual Human Curator" |
| ], |
| "rai:dataCollectionRawData": "Raw data consist of non-invasive MEG time series acquired during passive listening, paired with stimulus audio and time-locked linguistic event files. Stimuli include Sherlock Holmes LibriVox audiobooks, TIMIT, MOCHA-TIMIT, and The Moth podcast narratives, depending on the LibriBrain2 subset. Raw MEG is planned for release in BIDS/FIF format; ML-ready data are planned for release as minimally preprocessed HDF5.", |
| "rai:dataCollectionMissingData": "TODO: document any missing, excluded, corrupted, or failed sessions; removed/bad channels; incomplete annotations; and whether all expected paired TSV files are present. If there is no known missing data in the final release, replace this with a concise statement to that effect.", |
| "rai:dataPreprocessingProtocol": [ |
| "The serialised HDF5 release is minimally preprocessed for machine-learning use. Planned/expected steps include head-position correction, bad-channel handling, signal-space separation, 50/100 Hz notch filtering, band-pass filtering from 0.1 to 125 Hz, and downsampling from 1 kHz to 250 Hz.", |
| "Raw BIDS/FIF files are intended to preserve flexibility for users who want to apply their own preprocessing pipelines." |
| ], |
| "rai:dataManipulationProtocol": "Participant identifiers are pseudonymised. Text/audio alignments and event annotations may include manual correction or normalization where stimulus transcripts and audio diverge. No synthetic neural data are included.", |
| "rai:dataImputationProtocol": "No imputation of MEG recordings is intended. TODO: confirm whether bad channels are interpolated, marked only, or handled in another way in the preprocessed release.", |
| "rai:dataAnnotationProtocol": "Each MEG data file is paired with TSV event annotations derived from the stimulus audio/transcripts, with time-locked labels for linguistic events such as speech/non-speech, phonemes, and words where available. Annotations are designed to support decoding tasks such as word classification and related speech-decoding benchmarks. TODO: add the final alignment tools, manual correction protocol, label schema, and quality-control checks.", |
| "rai:dataAnnotationPlatform": [ |
| "TODO: add annotation/alignment tools and versions, e.g. forced aligner(s), transcript correction tools, and internal scripts." |
| ], |
| "rai:dataAnnotationAnalysis": "TODO: document annotation validation: alignment accuracy checks, manual review criteria, treatment of ambiguous transcript/audio mismatches, and known annotation error modes.", |
| "rai:machineAnnotationTools": [ |
| "TODO: add forced-alignment tool(s) and version(s)", |
| "TODO: add MEG preprocessing software and version(s), e.g. MaxFilter/MNE if applicable", |
| "Standard split are supported in the pnpl Python library starting in version 0.1.2" |
| ], |
| "rai:annotationsPerItem": "Not a crowdsourced-rating dataset. Each released MEG recording/session is expected to have one paired set of machine-generated and/or manually curated linguistic event annotations.", |
| "rai:annotatorDemographics": "No crowd annotator pool. Therefore, no demographics collected or released.", |
| "rai:dataUseCases": [ |
| "Training, validation, testing, and fine-tuning of neural speech decoding models.", |
| "Benchmarking non-invasive MEG word classification and related speech-decoding tasks using standard splits.", |
| "Studying within-subject scaling from deep single-subject data.", |
| "Studying data-efficient cross-subject generalisation from limited per-subject MEG recordings.", |
| "Phonetic, articulatory, semantic, and cross-corpus analyses of speech perception signals.", |
| "Development and evaluation of reproducible ML infrastructure for non-invasive brain-computer-interface research." |
| ], |
| "rai:dataLimitations": [ |
| "The recordings are from passive listening to perceived speech, not overt, attempted, or imagined speech; conclusions may not transfer directly to speech-production BCIs.", |
| "The data are MEG recordings from proficient English-speaking participants collected at one site with one scanner type; models may not generalise to other languages, populations, clinical users, recording sites, or modalities such as EEG/fMRI without further validation.", |
| "The dataset is intentionally broad-and-deep but still dominated by a high-data subject-0 component; aggregate model performance may overstate generalisation to low-data subjects.", |
| "Stimuli are English and include a substantial Sherlock Holmes audiobook component, so lexical, semantic, speaker, genre, and accent distributions are not representative of general conversation.", |
| "The preprocessed HDF5 release lowers the barrier for ML users but encodes preprocessing choices that may not be optimal for all analyses; users needing full signal-processing control should use the raw BIDS/FIF release.", |
| "The dataset is intended for research and benchmarking, not for clinical deployment or claims about restoring communication without further clinical validation." |
| ], |
| "rai:dataBiases": [ |
| "Subject distribution is deliberately imbalanced: one subject contributes far more data than other subjects.", |
| "The stimulus distribution is English-only and weighted toward audiobook speech, especially Sherlock Holmes, with additional but still curated TIMIT, MOCHA-TIMIT, and podcast stimuli.", |
| "The broad component uses limited per-subject recording time, so subject-specific estimates may be noisy.", |
| "Participant demographics are not fully represented in this Croissant file; users should not assume demographic representativeness unless documented in the final dataset card/paper.", |
| "Stimulus corpora carry their own speaker, dialect, genre, and corpus-design biases." |
| ], |
| "rai:personalSensitiveInformation": [ |
| "The dataset contains pseudonymised human neuroimaging recordings. Even when direct identifiers are removed, neural data should be treated as sensitive human-subject data.", |
| "Participants provided informed consent for pseudonymised data sharing. Users should not attempt re-identification or linkage to external information.", |
| "Subject-level demographic details should be released only in aggregated or privacy-preserving form consistent with the consent and ethics approval." |
| ], |
| "rai:dataSocialImpact": "The intended positive impact is to accelerate reproducible research on non-invasive neural speech decoding and, ultimately, communication-assistive BCIs. Risks include over-claiming clinical readiness, underestimating generalisation failures, or using neural data outside the consented research context. Users should report limitations, evaluate on standard splits, and avoid clinical or individual-level claims without appropriate validation.", |
| "rai:dataReleaseMaintenancePlan": "The intended release is a versioned, static Hugging Face dataset. Future bug-fix releases will update the dataset version and document changes in the dataset card/changelog.", |
| "rai:conformsTo": "http://mlcommons.org/croissant/RAI/1.0", |
| "version": "0.1.0-draft", |
| "datePublished": "2026-05-05", |
| "citation": "Mantegna, F., Jayalath, D., Elvers, G., Kim, T., Ballyk, B., Fung, A., Cho, S., Kwon, T., Kurth, L., Özdogan, M., Landau, G., Somaiya, P., Voets, N., Woolrich, M., and Parker Jones, O. LibriBrain100: One Hundred Hours of Broad and Deep MEG Data for Neural Speech Decoding at Scale. Dataset paper citation forthcoming.", |
| "rai:hasSyntheticData": false, |
| "rai:syntheticDataDescription": "No synthetic MEG recordings are included. The dataset consists of human MEG recordings collected during passive listening, paired with stimulus-derived linguistic annotations. Some metadata/annotations may be generated or assisted by preprocessing and alignment tools, but the core neural recordings are not synthetic.", |
| "citeAs": "Mantegna, F., Jayalath, D., Elvers, G., Kim, T., Ballyk, B., Fung, A., Cho, S., Kwon, T., Kurth, L., Özdogan, M., Landau, G., Somaiya, P., Voets, N., Woolrich, M., and Parker Jones, O. LibriBrain100: One Hundred Hours of Broad and Deep MEG Data for Neural Speech Decoding at Scale. Dataset paper citation forthcoming." |
| } |
|
|