feat: Add dataset card, loading script, and visual assets
#2
by
AshwinKM2005
- opened
- README.md +431 -0
- example_subject_01.png +3 -0
- fmri_fm.py +176 -0
- results.png +3 -0
README.md
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
pretty_name: "MindEye: fMRI-to-Image Reconstruction Dataset"
|
| 3 |
+
license: mit
|
| 4 |
+
tags:
|
| 5 |
+
- neuroscience
|
| 6 |
+
- fMRI
|
| 7 |
+
- brain-decoding
|
| 8 |
+
- image-reconstruction
|
| 9 |
+
- contrastive-learning
|
| 10 |
+
- diffusion-models
|
| 11 |
+
- CLIP
|
| 12 |
+
- natural-scenes
|
| 13 |
+
task_categories:
|
| 14 |
+
- image-to-image
|
| 15 |
+
- feature-extraction
|
| 16 |
+
size_categories:
|
| 17 |
+
- 10K<n<100K
|
| 18 |
+
|
| 19 |
+
configs:
|
| 20 |
+
- config_name: nsd-brain-trials
|
| 21 |
+
data_files: "datasets/flat-clips/nsd-train-task-clips-16t/*.pt"
|
| 22 |
+
default: true
|
| 23 |
+
dataset_info:
|
| 24 |
+
features:
|
| 25 |
+
- name: subject_id
|
| 26 |
+
dtype: string
|
| 27 |
+
description: "Subject identifier (e.g., 'subj01')"
|
| 28 |
+
- name: session
|
| 29 |
+
dtype: int32
|
| 30 |
+
description: "Scanning session number (1-40)"
|
| 31 |
+
- name: run
|
| 32 |
+
dtype: int32
|
| 33 |
+
description: "fMRI run number within session"
|
| 34 |
+
- name: n_frames
|
| 35 |
+
dtype: int32
|
| 36 |
+
description: "Total frames in the fMRI run (~301)"
|
| 37 |
+
- name: fmri_data
|
| 38 |
+
sequence:
|
| 39 |
+
sequence: float16
|
| 40 |
+
description: "Brain activity tensor"
|
| 41 |
+
- name: start_frame
|
| 42 |
+
dtype: int32
|
| 43 |
+
description: "Frame index where stimulus presentation begins"
|
| 44 |
+
- name: onset_time
|
| 45 |
+
dtype: float32
|
| 46 |
+
description: "Stimulus onset time in seconds"
|
| 47 |
+
- name: duration
|
| 48 |
+
dtype: float32
|
| 49 |
+
description: "Stimulus presentation duration (3.0s)"
|
| 50 |
+
- name: trial_type
|
| 51 |
+
dtype: string
|
| 52 |
+
description: "Always 'nsd' for this dataset"
|
| 53 |
+
- name: nsd_image_id
|
| 54 |
+
dtype: int32
|
| 55 |
+
description: "NSD image identifier (links to COCO dataset)"
|
| 56 |
+
- name: target_class
|
| 57 |
+
dtype: int32
|
| 58 |
+
description: "Target class/category for the stimulus"
|
| 59 |
+
|
| 60 |
+
- config_name: hcp-brain-trials
|
| 61 |
+
data_files: "datasets/flat-clips/hcp-train-task-clips-16t/*.pt"
|
| 62 |
+
dataset_info:
|
| 63 |
+
features:
|
| 64 |
+
- name: subject_id
|
| 65 |
+
dtype: string
|
| 66 |
+
description: "HCP subject identifier (6-digit code)"
|
| 67 |
+
- name: modality
|
| 68 |
+
dtype: string
|
| 69 |
+
description: "Imaging modality ('tfMRI' or 'rfMRI')"
|
| 70 |
+
- name: task
|
| 71 |
+
dtype: string
|
| 72 |
+
description: "HCP task name (e.g., RELATIONAL, SOCIAL, WM, REST1)"
|
| 73 |
+
- name: field_strength
|
| 74 |
+
dtype: string
|
| 75 |
+
description: "Magnetic field strength ('3T' or '7T')"
|
| 76 |
+
- name: phase_encoding
|
| 77 |
+
dtype: string
|
| 78 |
+
description: "Phase encoding direction ('LR', 'RL', 'PA', 'AP')"
|
| 79 |
+
- name: n_frames
|
| 80 |
+
dtype: int32
|
| 81 |
+
description: "Total frames in the fMRI run (126-918)"
|
| 82 |
+
- name: fmri_data
|
| 83 |
+
sequence:
|
| 84 |
+
sequence: float16
|
| 85 |
+
description: "Brain activity tensor"
|
| 86 |
+
- name: start_frame
|
| 87 |
+
dtype: int32
|
| 88 |
+
description: "Frame index where trial begins"
|
| 89 |
+
- name: onset_time
|
| 90 |
+
dtype: float32
|
| 91 |
+
description: "Trial onset time in seconds"
|
| 92 |
+
- name: duration
|
| 93 |
+
dtype: float32
|
| 94 |
+
description: "Trial duration in seconds"
|
| 95 |
+
- name: trial_type
|
| 96 |
+
dtype: string
|
| 97 |
+
description: "Specific trial condition (e.g., relation, mental, story)"
|
| 98 |
+
- name: target_class
|
| 99 |
+
dtype: int32
|
| 100 |
+
description: "Target class mapped via hcp-task-mapping config"
|
| 101 |
+
|
| 102 |
+
- config_name: hcp-flat-archives
|
| 103 |
+
data_files: "datasets/hcp-flat/*.tar"
|
| 104 |
+
|
| 105 |
+
- config_name: clip-image-embeddings
|
| 106 |
+
data_files: "datasets/nsd_clip_embeds.npy"
|
| 107 |
+
dataset_info:
|
| 108 |
+
features:
|
| 109 |
+
- name: image_embeddings
|
| 110 |
+
sequence:
|
| 111 |
+
sequence: float32
|
| 112 |
+
description: "CLIP ViT-L/14 embeddings for 73,000 NSD stimulus images"
|
| 113 |
+
|
| 114 |
+
- config_name: semantic-clusters
|
| 115 |
+
data_files: "datasets/nsd_coco_73k_semantic_cluster_ids.npy"
|
| 116 |
+
dataset_info:
|
| 117 |
+
features:
|
| 118 |
+
- name: cluster_ids
|
| 119 |
+
sequence: int64
|
| 120 |
+
description: "Semantic cluster assignments for COCO/NSD images (range: 0-40)"
|
| 121 |
+
|
| 122 |
+
- config_name: brain-parcellations
|
| 123 |
+
data_files:
|
| 124 |
+
- "datasets/Schaefer2018_400Parcels_7Networks_order.flat.npy"
|
| 125 |
+
- "datasets/Yeo2011_RSFC_7Networks.flat.npy"
|
| 126 |
+
dataset_info:
|
| 127 |
+
features:
|
| 128 |
+
- name: parcellation_type
|
| 129 |
+
dtype: string
|
| 130 |
+
description: "Atlas type ('Schaefer2018_400Parcels_7Networks' or 'Yeo2011_RSFC_7Networks')"
|
| 131 |
+
- name: parcellation_map
|
| 132 |
+
sequence:
|
| 133 |
+
sequence: int64
|
| 134 |
+
description: "Brain region mapping"
|
| 135 |
+
- name: max_regions
|
| 136 |
+
dtype: int32
|
| 137 |
+
description: "Maximum number of regions (400 for Schaefer, 7 for Yeo)"
|
| 138 |
+
|
| 139 |
+
- config_name: hcp-task-mapping
|
| 140 |
+
data_files: "datasets/hcp_trial_type_target_id_map.json"
|
| 141 |
+
dataset_info:
|
| 142 |
+
features:
|
| 143 |
+
- name: trial_type
|
| 144 |
+
dtype: string
|
| 145 |
+
description: "HCP task condition name"
|
| 146 |
+
- name: target_id
|
| 147 |
+
dtype: int32
|
| 148 |
+
description: "Mapped numerical identifier (0-20)"
|
| 149 |
+
|
| 150 |
+
- config_name: hcp-session-metadata
|
| 151 |
+
data_files: "datasets/session_metadata.json"
|
| 152 |
+
dataset_info:
|
| 153 |
+
features:
|
| 154 |
+
- name: session_key
|
| 155 |
+
dtype: string
|
| 156 |
+
description: "Session identifier (e.g., 'sub-349244_mod-tfMRI_task-RELATIONAL_mag-3T_dir-RL')"
|
| 157 |
+
- name: subject_id
|
| 158 |
+
dtype: string
|
| 159 |
+
description: "HCP subject identifier (6-digit code)"
|
| 160 |
+
- name: task
|
| 161 |
+
dtype: string
|
| 162 |
+
description: "HCP task name"
|
| 163 |
+
- name: modality
|
| 164 |
+
dtype: string
|
| 165 |
+
description: "Imaging modality ('tfMRI' or 'rfMRI')"
|
| 166 |
+
- name: field_strength
|
| 167 |
+
dtype: string
|
| 168 |
+
description: "Magnetic field strength ('3T' or '7T')"
|
| 169 |
+
- name: phase_encoding
|
| 170 |
+
dtype: string
|
| 171 |
+
description: "Phase encoding direction ('LR', 'RL', 'PA', 'AP')"
|
| 172 |
+
- name: n_frames
|
| 173 |
+
dtype: int32
|
| 174 |
+
description: "Total frames in session (126-918)"
|
| 175 |
+
- name: n_voxels
|
| 176 |
+
dtype: int32
|
| 177 |
+
description: "Total voxels in session (77,763)"
|
| 178 |
+
---
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# 🧠 MindEye: fMRI-to-Image Reconstruction Dataset
|
| 182 |
+
[](https://arxiv.org/abs/2305.18274)
|
| 183 |
+
[](LICENSE)
|
| 184 |
+
[](https://huggingface.co/datasets/medarc/fmri-fm)
|
| 185 |
+
[](https://discord.gg/tVR4TWnRM9)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
MindEye is a groundbreaking fMRI-to-image dataset that enables state-of-the-art reconstruction and retrieval of viewed natural scene images from human brain activity.
|
| 189 |
+
|
| 190 |
+
- 🎥 Built on the **Natural Scenes Dataset (NSD)**, containing brain responses from **4 participants** who passively viewed **MS-COCO natural scenes** during **7-Tesla fMRI scanning**
|
| 191 |
+
- 🏆 Achieves **>90% accuracy** across multiple reconstruction metrics and **>93% top-1 retrieval accuracy**, marking a major breakthrough in neural decoding
|
| 192 |
+
- 🔗 Maps **fMRI brain activity to CLIP image embeddings** through **specialized contrastive learning frameworks** and **diffusion-based generative models**
|
| 193 |
+
- 🎨 Combines **high-level semantic information** with **low-level perceptual features**, enabling **fine-grained decoding** that can distinguish between **highly similar images** (e.g., different zebras)
|
| 194 |
+
- 🌍 Demonstrates scalability to **billion-image retrieval tasks** using **LAION-5B**, extending its impact to internet-scale benchmarks
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
|
| 199 |
+
**Figure:** Paired side-by-side example — **Left:** MS-COCO stimulus shown during scanning; **Right:** MindEye reconstruction (Subject 01) derived from `fmri_voxels` + `clip_embeddings`.
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
---
|
| 203 |
+
|
| 204 |
+
## 📑 Table of Contents
|
| 205 |
+
- [Quickstart](#quickstart)
|
| 206 |
+
- [Dataset Configurations](#dataset-configurations)
|
| 207 |
+
- [nsd-brain-trials (Default)](#nsd-brain-trials-default)
|
| 208 |
+
- [hcp-brain-trials](#hcp-brain-trials)
|
| 209 |
+
- [clip-image-embeddings](#clip-image-embeddings)
|
| 210 |
+
- [hcp-session-metadata](#hcp-session-metadata)
|
| 211 |
+
- [Other Configurations](#other-configurations)
|
| 212 |
+
- [Provenance & Processing](#provenance--processing)
|
| 213 |
+
- [Intended Uses & Limitations](#intended-uses--limitations)
|
| 214 |
+
- [Recommended Uses](#recommended-uses)
|
| 215 |
+
- [Out-of-Scope Uses](#out-of-scope-uses)
|
| 216 |
+
- [Known Limitations](#known-limitations)
|
| 217 |
+
- [Bias & Fairness Considerations](#bias--fairness-considerations)
|
| 218 |
+
- [Results](#results)
|
| 219 |
+
- [Dataset Scale](#dataset-scale)
|
| 220 |
+
- [Citation](#citation)
|
| 221 |
+
- [License](#license)
|
| 222 |
+
---
|
| 223 |
+
|
| 224 |
+
<a id="quickstart"></a>
|
| 225 |
+
# 🚀 Quickstart
|
| 226 |
+
|
| 227 |
+
```python
|
| 228 |
+
from datasets import load_dataset
|
| 229 |
+
|
| 230 |
+
# 1. Load the default NSD brain trials (streaming recommended for large configs)
|
| 231 |
+
nsd_ds = load_dataset("medarc/fmri-fm", name="nsd-brain-trials", streaming=True, split="train")
|
| 232 |
+
example = next(iter(nsd_ds))
|
| 233 |
+
|
| 234 |
+
print(f"Subject: {example['subject_id']}, Session: {example['session']}, NSD Image ID: {example['nsd_image_id']}")
|
| 235 |
+
print(f"fMRI Data (first 5 values of first frame): {example['fmri_data'][0][:5]}")
|
| 236 |
+
|
| 237 |
+
# 2. Load the supplementary HCP brain trials (streaming)
|
| 238 |
+
hcp_ds = load_dataset("medarc/fmri-fm", name="hcp-brain-trials", streaming=True, split="train")
|
| 239 |
+
hcp_example = next(iter(hcp_ds))
|
| 240 |
+
|
| 241 |
+
print(f"HCP Subject: {hcp_example['subject_id']}, Task: {hcp_example['task']}")
|
| 242 |
+
|
| 243 |
+
# 3. Load CLIP image embeddings for all NSD stimuli (dense table, non-streaming)
|
| 244 |
+
clip_ds = load_dataset("medarc/fmri-fm", name="clip-image-embeddings", split="train")
|
| 245 |
+
print(f"Total embeddings: {len(clip_ds)}")
|
| 246 |
+
print(f"Embedding vector length (first image): {len(clip_ds[0]['image_embeddings'])}")
|
| 247 |
+
|
| 248 |
+
# 4. Load HCP session metadata
|
| 249 |
+
metadata_ds = load_dataset("medarc/fmri-fm", name="hcp-session-metadata", split="train")
|
| 250 |
+
meta_example = next(iter(metadata_ds))
|
| 251 |
+
|
| 252 |
+
print(f"Example Session Key: {meta_example['session_key']}")
|
| 253 |
+
print(f"Number of voxels in session: {meta_example['n_voxels']}")
|
| 254 |
+
|
| 255 |
+
```
|
| 256 |
+
---
|
| 257 |
+
|
| 258 |
+
<a id="dataset-configurations"></a>
|
| 259 |
+
# 📦 Dataset Configurations
|
| 260 |
+
---
|
| 261 |
+
|
| 262 |
+
<a id="nsd-brain-trials"></a>
|
| 263 |
+
## 🔹 nsd-brain-trials (Default)
|
| 264 |
+
|
| 265 |
+
Contains the primary fMRI signals from the Natural Scenes Dataset, used as the main inputs for the MindEye model.
|
| 266 |
+
|
| 267 |
+
| **Field Name** | **Type** | **Description** |
|
| 268 |
+
|------------------|---------------------------|---------------------------------------------------------------------------------|
|
| 269 |
+
| `subject_id` | `string` | Subject identifier (e.g., "subj01") |
|
| 270 |
+
| `session` | `int32` | Scanning session number (1-40) |
|
| 271 |
+
| `run` | `int32` | fMRI run number within session |
|
| 272 |
+
| `fmri_data` | `sequence[sequence[float16]]` | Brain activity tensor |
|
| 273 |
+
| `nsd_image_id` | `int32` | NSD image identifier (links to COCO dataset) |
|
| 274 |
+
|
| 275 |
+
---
|
| 276 |
+
|
| 277 |
+
<a id="hcp-brain-trials"></a>
|
| 278 |
+
## 🔹 hcp-brain-trials
|
| 279 |
+
Contains supplementary fMRI data from the Human Connectome Project (HCP) for various cognitive tasks.
|
| 280 |
+
|
| 281 |
+
| **Field Name** | **Type** | **Description** |
|
| 282 |
+
|------------------|---------------------------|---------------------------------------------------------------------------------|
|
| 283 |
+
| `subject_id` | `string` | HCP subject identifier (6-digit code) |
|
| 284 |
+
| `modality` | `string` | Imaging modality (`"tfMRI"` or `"rfMRI"`) |
|
| 285 |
+
| `task` | `string` | HCP task name (RELATIONAL, SOCIAL, WM, etc.) |
|
| 286 |
+
| `fmri_data` | `sequence[sequence[float16]]` | Brain activity tensor |
|
| 287 |
+
| `trial_type` | `string` | Specific trial condition (relation, mental, etc.) |
|
| 288 |
+
|
| 289 |
+
---
|
| 290 |
+
|
| 291 |
+
<a id="clip-image-embeddings"></a>
|
| 292 |
+
## 🔹 clip-image-embeddings
|
| 293 |
+
|
| 294 |
+
Contains the target CLIP (ViT-L/14) image embeddings for the 73,000 NSD stimulus images.
|
| 295 |
+
|
| 296 |
+
| **Field Name** | **Type** | **Description** |
|
| 297 |
+
|-----------------------|---------------------------|---------------------------------------------------------------------------------|
|
| 298 |
+
| `image_embeddings` | `sequence[sequence[float32]]` | CLIP ViT-L/14 embeddings for NSD images |
|
| 299 |
+
|
| 300 |
+
---
|
| 301 |
+
<a id="hcp-session-metadata"></a>
|
| 302 |
+
## 🔹 hcp-session-metadata
|
| 303 |
+
Detailed metadata for each fMRI session in the HCP dataset.
|
| 304 |
+
|
| 305 |
+
| **Field Name** | **Type** | **Description** |
|
| 306 |
+
|------------------|------------|-----------------------------------------------------|
|
| 307 |
+
| `session_key` | `string` | Unique session identifier |
|
| 308 |
+
| `subject_id` | `string` | HCP subject identifier |
|
| 309 |
+
| `task` | `string` | HCP task name |
|
| 310 |
+
| `n_voxels` | `int32` | Total voxels in session (77,763) |
|
| 311 |
+
|
| 312 |
+
---
|
| 313 |
+
|
| 314 |
+
<a id="other-configurations"></a>
|
| 315 |
+
## 🔹 Other Configurations
|
| 316 |
+
|
| 317 |
+
- **hcp-session-archives:** Raw .tar archives of preprocessed fMRI data from the HCP dataset.
|
| 318 |
+
- **semantic-clusters:** Semantic cluster assignments for the 73k COCO/NSD images.
|
| 319 |
+
- **brain-parcellations:** Brain atlas files (Schaefer 2018, Yeo 2011) used for feature engineering.
|
| 320 |
+
- **hcp-task-mapping:** JSON file mapping HCP task conditions to numerical target IDs.
|
| 321 |
+
|
| 322 |
+
---
|
| 323 |
+
|
| 324 |
+
<a id="provenance-processing"></a>
|
| 325 |
+
# 🔬 Provenance & Processing
|
| 326 |
+
|
| 327 |
+
This dataset is constructed from two major sources:
|
| 328 |
+
- **Natural Scenes Dataset (NSD)**
|
| 329 |
+
- **Human Connectome Project (HCP)**
|
| 330 |
+
|
| 331 |
+
The primary neuroimaging data was collected using a **7-Tesla (7T) fMRI scanner**. All fMRI data underwent rigorous preprocessing including:
|
| 332 |
+
- **GLMsingle**: General Linear Model single-trial estimation
|
| 333 |
+
- **Z-scoring**: Session-wise normalization of signals
|
| 334 |
+
- **Brain parcellation**: Feature vectors constructed via atlases such as the **Schaefer 2018 (400 Parcels)** and **Yeo 2011 (7 Networks)**—reducing volumetric brain data into region-wise summaries suitable for machine learning tasks.
|
| 335 |
+
|
| 336 |
+
---
|
| 337 |
+
<a id="intended-uses-limitations"></a>
|
| 338 |
+
# ✅ Intended Uses & Limitations
|
| 339 |
+
|
| 340 |
+
<a id="recommended-uses"></a>
|
| 341 |
+
### Recommended Uses
|
| 342 |
+
|
| 343 |
+
- **Neuroscience Research**: Understanding visual cortex representations, brain-computer interfaces, and neural mechanisms of visual perception
|
| 344 |
+
- **Computer Vision**: Developing novel multimodal learning approaches between brain signals and visual data, contrastive learning research
|
| 345 |
+
- **AI Model Development**: Training and benchmarking brain decoding models, diffusion-based reconstruction systems
|
| 346 |
+
- **Medical Applications**: Research into locked-in syndrome communication, depression assessment through visual bias analysis, neurological disorder diagnosis
|
| 347 |
+
|
| 348 |
+
---
|
| 349 |
+
|
| 350 |
+
<a id="out-of-scope-uses"></a>
|
| 351 |
+
### Out-of-Scope Uses
|
| 352 |
+
|
| 353 |
+
- ⚠️ **Clinical Diagnosis**: Not validated for medical diagnosis without extensive additional clinical validation and regulatory approval
|
| 354 |
+
- ⚠️ **Cross-subject Generalization**: Models are subject-specific and do not generalize across individuals without additional training data
|
| 355 |
+
- ⚠️ **Non-consensual Applications**: Requires active participant compliance; easily defeated by head movement, unrelated thinking, or non-compliance
|
| 356 |
+
---
|
| 357 |
+
### Known Limitations
|
| 358 |
+
|
| 359 |
+
- **Subject Specificity**: Each participant requires individual model training with extensive fMRI data (up to 40 hours scanning)
|
| 360 |
+
- **Compliance Requirement**: Non-invasive neuroimaging requires participant cooperation and cannot be used covertly
|
| 361 |
+
- **Single-trial Degradation**: Performance significantly degrades when using single-trial vs. averaged responses
|
| 362 |
+
---
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
<a id="bias-fairness"></a>
|
| 366 |
+
# ⚖️ Bias & Fairness Considerations
|
| 367 |
+
|
| 368 |
+
- **Sampling Biases**: Limited to 4 participants, all capable of undergoing extensive MRI scanning. Geographic and cultural representation not specified.
|
| 369 |
+
|
| 370 |
+
- **Image Distribution**: Images limited to MS-COCO natural scenes, which may not represent diverse visual experiences across cultures, environments, or individual visual preferences.
|
| 371 |
+
|
| 372 |
+
- **Technical Access**: Requires expensive 7-Tesla MRI equipment and substantial computational resources, limiting accessibility and reproducibility across research groups.
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
<a id="results"></a>
|
| 376 |
+
# 📊 Results
|
| 377 |
+

|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
| **Category** | **Best Prior SOTA** | **MindEye** |
|
| 381 |
+
|-------------------------|---------------------|-------------|
|
| 382 |
+
| Pixel Correlation | 0.254 (Ozcelik) | **0.309** |
|
| 383 |
+
| SSIM | 0.356 (Ozcelik) | **0.323** |
|
| 384 |
+
| Image Retrieval (top-1)| 94.2% (Ozcelik) | **97.8%** |
|
| 385 |
+
| Brain Retrieval (top-1)| 30.3% (Ozcelik) | **90.1%** |
|
| 386 |
+
| CLIP Identification | 91.5% (Ozcelik) | **94.1%** |
|
| 387 |
+
| Parameter Efficiency | 1.45B (Ozcelik LL) | **206M** |
|
| 388 |
+
|
| 389 |
+
<a id="dataset-scale"></a>
|
| 390 |
+
# 📏 Dataset Scale:
|
| 391 |
+
|
| 392 |
+
- **Training samples**: 24,980 across 4 subjects (individual trials preserved)
|
| 393 |
+
- **Test samples**: 982 (averaged across 3 repetitions per image)
|
| 394 |
+
- **Voxels per subject**: 13,000-16,000 from nsdgeneral brain region
|
| 395 |
+
|
| 396 |
+
<a id="citation"></a>
|
| 397 |
+
# 📖 Citation
|
| 398 |
+
Please cite:
|
| 399 |
+
```
|
| 400 |
+
@article{scotti2023reconstructing,
|
| 401 |
+
title={Reconstructing the Mind's Eye: fMRI-to-Image with Contrastive Learning and Diffusion Priors},
|
| 402 |
+
author={Paul S. Scotti and Atmadeep Banerjee and Jimmie Goode and Stepan Shabalin and Alex Nguyen and Ethan Cohen and Aidan J. Dempster and Nathalie Verlinde and Elad Yundler and David Weisberg and Kenneth A. Norman and Tanishq Mathew Abraham},
|
| 403 |
+
journal={arXiv preprint arXiv:2305.18274},
|
| 404 |
+
year={2023},
|
| 405 |
+
url={[https://arxiv.org/abs/2305.18274v2}](https://arxiv.org/abs/2305.18274v2%7D)
|
| 406 |
+
}
|
| 407 |
+
```
|
| 408 |
+
<a id="license"></a>
|
| 409 |
+
# 📜 License
|
| 410 |
+
```
|
| 411 |
+
MIT License
|
| 412 |
+
Copyright (c) 2022 MEDARC
|
| 413 |
+
|
| 414 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 415 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 416 |
+
in the Software without restriction, including without limitation the rights
|
| 417 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 418 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 419 |
+
furnished to do so, subject to the following conditions:
|
| 420 |
+
|
| 421 |
+
The above copyright notice and this permission notice shall be included in all
|
| 422 |
+
copies or substantial portions of the Software.
|
| 423 |
+
|
| 424 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 425 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 426 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 427 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 428 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 429 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 430 |
+
SOFTWARE.
|
| 431 |
+
```
|
example_subject_01.png
ADDED
|
Git LFS Details
|
fmri_fm.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datasets
|
| 2 |
+
import torch
|
| 3 |
+
import numpy as np
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
import tarfile
|
| 7 |
+
|
| 8 |
+
_DESCRIPTION = """
|
| 9 |
+
MindEye is a groundbreaking fMRI-to-image dataset that enables state-of-the-art reconstruction and retrieval of viewed natural scene images from human brain activity. Built on the Natural Scenes Dataset (NSD), this dataset contains brain responses from 4 human participants who passively viewed MS-COCO natural scenes during 7-Tesla fMRI scanning. This repository also includes supplementary data from the Human Connectome Project (HCP).
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
_HOMEPAGE = "https://medarc.ai/mindeye/"
|
| 13 |
+
|
| 14 |
+
_LICENSE = "MIT"
|
| 15 |
+
|
| 16 |
+
_CITATION = """
|
| 17 |
+
@article{scotti2023reconstructing,
|
| 18 |
+
title={Reconstructing the Mind's Eye: fMRI-to-Image with Contrastive Learning and Diffusion Priors},
|
| 19 |
+
author={Paul S. Scotti and Atmadeep Banerjee and Jimmie Goode and Stepan Shabalin and Alex Nguyen and Ethan Cohen and Aidan J. Dempster and Nathalie Verlinde and Elad Yundler and David Weisberg and Kenneth A. Norman and Tanishq Mathew Abraham},
|
| 20 |
+
journal={arXiv preprint arXiv:2305.18274},
|
| 21 |
+
year={2023},
|
| 22 |
+
url={https://arxiv.org/abs/2305.18274v2}
|
| 23 |
+
}
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
class MindEyeFmri(datasets.GeneratorBasedBuilder):
|
| 27 |
+
"""The MindEye fMRI and HCP Foundation Model Dataset."""
|
| 28 |
+
|
| 29 |
+
VERSION = datasets.Version("1.0.0")
|
| 30 |
+
|
| 31 |
+
BUILDER_CONFIGS = [
|
| 32 |
+
datasets.BuilderConfig(name="nsd-brain-trials", version=VERSION, description="fMRI trial data from the Natural Scenes Dataset (NSD)."),
|
| 33 |
+
datasets.BuilderConfig(name="hcp-brain-trials", version=VERSION, description="fMRI trial data from the Human Connectome Project (HCP)."),
|
| 34 |
+
datasets.BuilderConfig(name="hcp-flat-archives", version=VERSION, description="Raw .tar archives of preprocessed HCP fMRI data."),
|
| 35 |
+
datasets.BuilderConfig(name="clip-image-embeddings", version=VERSION, description="CLIP ViT-L/14 embeddings for NSD stimulus images."),
|
| 36 |
+
datasets.BuilderConfig(name="semantic-clusters", version=VERSION, description="Semantic cluster assignments for COCO/NSD images."),
|
| 37 |
+
datasets.BuilderConfig(name="brain-parcellations", version=VERSION, description="Brain atlas files (Schaefer 2018, Yeo 2011) for feature engineering."),
|
| 38 |
+
datasets.BuilderConfig(name="hcp-task-mapping", version=VERSION, description="Mapping from HCP task trial types to numerical target IDs."),
|
| 39 |
+
datasets.BuilderConfig(name="hcp-session-metadata", version=VERSION, description="Session-level metadata for the HCP dataset portion."),
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
DEFAULT_CONFIG_NAME = "nsd-brain-trials"
|
| 43 |
+
|
| 44 |
+
def _info(self):
|
| 45 |
+
if self.config.name == "nsd-brain-trials":
|
| 46 |
+
features = datasets.Features({
|
| 47 |
+
"subject_id": datasets.Value("string"), "session": datasets.Value("int32"), "run": datasets.Value("int32"),
|
| 48 |
+
"n_frames": datasets.Value("int32"), "fmri_data": datasets.Sequence(datasets.Sequence(datasets.Value("float16"))),
|
| 49 |
+
"start_frame": datasets.Value("int32"), "onset_time": datasets.Value("float32"), "duration": datasets.Value("float32"),
|
| 50 |
+
"trial_type": datasets.Value("string"), "nsd_image_id": datasets.Value("int32"), "target_class": datasets.Value("int32"),
|
| 51 |
+
})
|
| 52 |
+
elif self.config.name == "hcp-brain-trials":
|
| 53 |
+
features = datasets.Features({
|
| 54 |
+
"subject_id": datasets.Value("string"), "modality": datasets.Value("string"), "task": datasets.Value("string"),
|
| 55 |
+
"field_strength": datasets.Value("string"), "phase_encoding": datasets.Value("string"), "n_frames": datasets.Value("int32"),
|
| 56 |
+
"fmri_data": datasets.Sequence(datasets.Sequence(datasets.Value("float16"))), "start_frame": datasets.Value("int32"),
|
| 57 |
+
"onset_time": datasets.Value("float32"), "duration": datasets.Value("float32"), "trial_type": datasets.Value("string"),
|
| 58 |
+
"target_class": datasets.Value("int32"),
|
| 59 |
+
})
|
| 60 |
+
elif self.config.name == "hcp-flat-archives":
|
| 61 |
+
features = datasets.Features({"file_path": datasets.Value("string")})
|
| 62 |
+
elif self.config.name == "clip-image-embeddings":
|
| 63 |
+
features = datasets.Features({"image_embeddings": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))})
|
| 64 |
+
elif self.config.name == "semantic-clusters":
|
| 65 |
+
features = datasets.Features({"cluster_ids": datasets.Sequence(datasets.Value("int64"))})
|
| 66 |
+
elif self.config.name == "brain-parcellations":
|
| 67 |
+
features = datasets.Features({
|
| 68 |
+
"parcellation_type": datasets.Value("string"), "parcellation_map": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 69 |
+
"max_regions": datasets.Value("int32"),
|
| 70 |
+
})
|
| 71 |
+
elif self.config.name == "hcp-task-mapping":
|
| 72 |
+
features = datasets.Features({"trial_type": datasets.Value("string"), "target_id": datasets.Value("int32")})
|
| 73 |
+
elif self.config.name == "hcp-session-metadata":
|
| 74 |
+
features = datasets.Features({
|
| 75 |
+
"session_key": datasets.Value("string"), "subject_id": datasets.Value("string"), "task": datasets.Value("string"),
|
| 76 |
+
"modality": datasets.Value("string"), "magnet_strength_t": datasets.Value("string"), "phase_encoding_dir": datasets.Value("string"),
|
| 77 |
+
"n_frames": datasets.Value("int32"), "n_voxels": datasets.Value("int32"),
|
| 78 |
+
})
|
| 79 |
+
else:
|
| 80 |
+
raise ValueError(f"Unknown config name: {self.config.name}")
|
| 81 |
+
|
| 82 |
+
return datasets.DatasetInfo(
|
| 83 |
+
description=_DESCRIPTION,
|
| 84 |
+
features=features,
|
| 85 |
+
homepage=_HOMEPAGE,
|
| 86 |
+
license=_LICENSE,
|
| 87 |
+
citation=_CITATION,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
def _split_generators(self, dl_manager):
|
| 91 |
+
data_dir = dl_manager.manual_dir if dl_manager.manual_dir else "."
|
| 92 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir})]
|
| 93 |
+
|
| 94 |
+
def _generate_examples(self, data_dir):
|
| 95 |
+
config_name = self.config.name
|
| 96 |
+
|
| 97 |
+
if config_name == "nsd-brain-trials":
|
| 98 |
+
pt_dir = os.path.join(data_dir, "datasets/flat-clips/nsd-train-task-clips-16t")
|
| 99 |
+
if not os.path.exists(pt_dir): return
|
| 100 |
+
for i, pt_file in enumerate(sorted(os.listdir(pt_dir))):
|
| 101 |
+
if pt_file.endswith(".pt"):
|
| 102 |
+
file_path = os.path.join(pt_dir, pt_file)
|
| 103 |
+
try:
|
| 104 |
+
data = torch.load(file_path)
|
| 105 |
+
yield i, {
|
| 106 |
+
"subject_id": str(data.get("subject_id", "")), "session": int(data.get("session", -1)), "run": int(data.get("run", -1)),
|
| 107 |
+
"n_frames": int(data.get("n_frames", -1)), "fmri_data": data.get("fmri_data").tolist() if data.get("fmri_data") is not None else [],
|
| 108 |
+
"start_frame": int(data.get("start_frame", -1)), "onset_time": float(data.get("onset_time", -1.0)), "duration": float(data.get("duration", -1.0)),
|
| 109 |
+
"trial_type": str(data.get("trial_type", "")), "nsd_image_id": int(data.get("nsd_image_id", -1)), "target_class": int(data.get("target_class", -1)),
|
| 110 |
+
}
|
| 111 |
+
except Exception as e:
|
| 112 |
+
print(f"Error loading {file_path}: {e}")
|
| 113 |
+
|
| 114 |
+
elif config_name == "hcp-brain-trials":
|
| 115 |
+
pt_dir = os.path.join(data_dir, "datasets/flat-clips/hcp-train-task-clips-16t")
|
| 116 |
+
if not os.path.exists(pt_dir): return
|
| 117 |
+
for i, pt_file in enumerate(sorted(os.listdir(pt_dir))):
|
| 118 |
+
if pt_file.endswith(".pt"):
|
| 119 |
+
file_path = os.path.join(pt_dir, pt_file)
|
| 120 |
+
try:
|
| 121 |
+
data = torch.load(file_path)
|
| 122 |
+
yield i, {
|
| 123 |
+
"subject_id": str(data.get("subject_id", "")), "modality": str(data.get("modality", "")), "task": str(data.get("task", "")),
|
| 124 |
+
"field_strength": str(data.get("field_strength", "")), "phase_encoding": str(data.get("phase_encoding", "")), "n_frames": int(data.get("n_frames", -1)),
|
| 125 |
+
"fmri_data": data.get("fmri_data").tolist() if data.get("fmri_data") is not None else [], "start_frame": int(data.get("start_frame", -1)),
|
| 126 |
+
"onset_time": float(data.get("onset_time", -1.0)), "duration": float(data.get("duration", -1.0)), "trial_type": str(data.get("trial_type", "")),
|
| 127 |
+
"target_class": int(data.get("target_class", -1)),
|
| 128 |
+
}
|
| 129 |
+
except Exception as e:
|
| 130 |
+
print(f"Error loading {file_path}: {e}")
|
| 131 |
+
|
| 132 |
+
elif config_name == "hcp-flat-archives":
|
| 133 |
+
archive_dir = os.path.join(data_dir, "datasets/hcp-flat")
|
| 134 |
+
if not os.path.exists(archive_dir): return
|
| 135 |
+
for i, archive_file in enumerate(sorted(os.listdir(archive_dir))):
|
| 136 |
+
if archive_file.endswith(".tar"):
|
| 137 |
+
yield i, {"file_path": os.path.join(archive_dir, archive_file)}
|
| 138 |
+
|
| 139 |
+
elif config_name == "clip-image-embeddings":
|
| 140 |
+
file_path = os.path.join(data_dir, "datasets/nsd_clip_embeds.npy")
|
| 141 |
+
if not os.path.exists(file_path): return
|
| 142 |
+
embeddings = np.load(file_path, allow_pickle=True)
|
| 143 |
+
yield 0, {"image_embeddings": embeddings.tolist()}
|
| 144 |
+
|
| 145 |
+
elif config_name == "semantic-clusters":
|
| 146 |
+
file_path = os.path.join(data_dir, "datasets/nsd_coco_73k_semantic_cluster_ids.npy")
|
| 147 |
+
if not os.path.exists(file_path): return
|
| 148 |
+
clusters = np.load(file_path)
|
| 149 |
+
yield 0, {"cluster_ids": clusters.tolist()}
|
| 150 |
+
|
| 151 |
+
elif config_name == "brain-parcellations":
|
| 152 |
+
schaefer_path = os.path.join(data_dir, "datasets/Schaefer2018_400Parcels_7Networks_order.flat.npy")
|
| 153 |
+
if os.path.exists(schaefer_path):
|
| 154 |
+
schaefer_map = np.load(schaefer_path)
|
| 155 |
+
yield 0, {"parcellation_type": "Schaefer2018_400Parcels_7Networks", "parcellation_map": schaefer_map.tolist(), "max_regions": 400}
|
| 156 |
+
yeo_path = os.path.join(data_dir, "datasets/Yeo2011_RSFC_7Networks.flat.npy")
|
| 157 |
+
if os.path.exists(yeo_path):
|
| 158 |
+
yeo_map = np.load(yeo_path)
|
| 159 |
+
yield 1, {"parcellation_type": "Yeo2011_RSFC_7Networks", "parcellation_map": yeo_map.tolist(), "max_regions": 7}
|
| 160 |
+
|
| 161 |
+
elif config_name == "hcp-task-mapping":
|
| 162 |
+
file_path = os.path.join(data_dir, "datasets/hcp_trial_type_target_id_map.json")
|
| 163 |
+
if not os.path.exists(file_path): return
|
| 164 |
+
with open(file_path, 'r') as f:
|
| 165 |
+
mapping = json.load(f)
|
| 166 |
+
for i, (key, value) in enumerate(mapping.items()):
|
| 167 |
+
yield i, {"trial_type": key, "target_id": value}
|
| 168 |
+
|
| 169 |
+
elif config_name == "hcp-session-metadata":
|
| 170 |
+
file_path = os.path.join(data_dir, "datasets/session_metadata.json")
|
| 171 |
+
if not os.path.exists(file_path): return
|
| 172 |
+
with open(file_path, 'r') as f:
|
| 173 |
+
metadata = json.load(f)
|
| 174 |
+
for i, item in enumerate(metadata):
|
| 175 |
+
yield i, item
|
| 176 |
+
|
results.png
ADDED
|
Git LFS Details
|