JayLacoma commited on
Commit
09497a6
Β·
verified Β·
1 Parent(s): e9252d6

Upload 3 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ fsaverage-vol-5mm-src.fif filter=lfs diff=lfs merge=lfs -text
fsaverage-vol-5mm-src.fif ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbec50cf54059c693d4cc6c48cbca92146514d499e6d0cc1f20aaae19b72b731
3
+ size 234419895
ghw280_from_egig.gpsc ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FidNz 103 203 122
2
+ FidT9 25 98 112
3
+ FidT10 176 101 114
4
+ E1 99 194 201
5
+ E2 107 203 186
6
+ E3 114 191 204
7
+ E4 99 181 216
8
+ E5 82 189 203
9
+ E6 89 202 185
10
+ E7 100 208 169
11
+ E8 117 207 170
12
+ E9 125 198 190
13
+ E10 130 185 206
14
+ E11 115 178 218
15
+ E12 100 167 226
16
+ E13 82 176 216
17
+ E14 66 181 204
18
+ E15 70 197 186
19
+ E16 80 206 171
20
+ E17 91 207 149
21
+ E18 109 207 149
22
+ E19 127 205 156
23
+ E20 136 202 174
24
+ E21 142 190 193
25
+ E22 145 173 209
26
+ E23 131 167 221
27
+ E24 118 161 229
28
+ E25 102 149 237
29
+ E26 83 159 229
30
+ E27 66 163 219
31
+ E28 52 169 204
32
+ E29 55 185 188
33
+ E30 60 197 167
34
+ E31 74 204 152
35
+ E32 103 206 129
36
+ E33 155 189 171
37
+ E34 132 147 232
38
+ E35 68 145 231
39
+ E36 46 183 166
40
+ E37 164 160 105
41
+ E38 168 145 85
42
+ E39 171 145 105
43
+ E40 173 149 125
44
+ E41 167 166 129
45
+ E42 149 169 73
46
+ E43 153 151 62
47
+ E44 163 136 68
48
+ E45 174 125 91
49
+ E46 175 124 109
50
+ E47 177 131 127
51
+ E48 178 143 144
52
+ E49 171 161 147
53
+ E50 163 175 149
54
+ E51 147 195 154
55
+ E52 136 180 93
56
+ E53 158 120 53
57
+ E54 179 114 128
58
+ E55 182 125 144
59
+ E56 178 128 199
60
+ E57 178 143 181
61
+ E58 185 125 181
62
+ E59 182 112 203
63
+ E60 169 130 215
64
+ E61 171 145 199
65
+ E62 169 160 187
66
+ E63 176 152 163
67
+ E64 183 135 161
68
+ E65 187 117 161
69
+ E66 187 108 184
70
+ E67 184 95 205
71
+ E68 174 108 219
72
+ E69 157 121 229
73
+ E70 157 144 220
74
+ E71 159 159 207
75
+ E72 158 176 191
76
+ E73 167 169 167
77
+ E74 185 106 144
78
+ E75 189 97 166
79
+ E76 187 87 184
80
+ E77 184 78 207
81
+ E78 176 90 223
82
+ E79 160 100 234
83
+ E80 144 116 238
84
+ E81 146 136 230
85
+ E82 145 156 222
86
+ E83 187 80 167
87
+ E84 160 79 238
88
+ E85 164 58 102
89
+ E86 177 109 95
90
+ E87 173 98 79
91
+ E88 170 83 78
92
+ E89 163 68 85
93
+ E90 156 45 102
94
+ E91 169 57 120
95
+ E92 177 67 135
96
+ E93 169 120 76
97
+ E94 164 103 63
98
+ E95 159 69 67
99
+ E96 155 53 83
100
+ E97 145 34 102
101
+ E98 158 40 122
102
+ E99 169 48 135
103
+ E100 174 51 153
104
+ E101 181 69 151
105
+ E102 148 48 66
106
+ E103 157 32 136
107
+ E104 155 29 206
108
+ E105 168 43 208
109
+ E106 165 34 190
110
+ E107 150 22 191
111
+ E108 140 24 211
112
+ E109 152 38 224
113
+ E110 164 52 225
114
+ E111 177 58 209
115
+ E112 176 49 189
116
+ E113 171 41 171
117
+ E114 158 27 171
118
+ E115 143 17 173
119
+ E116 133 16 194
120
+ E117 123 18 212
121
+ E118 132 31 228
122
+ E119 142 46 238
123
+ E120 153 60 239
124
+ E121 173 71 224
125
+ E122 183 68 187
126
+ E123 179 59 170
127
+ E124 165 36 154
128
+ E125 150 22 156
129
+ E126 132 11 157
130
+ E127 123 10 175
131
+ E128 114 12 196
132
+ E129 102 16 212
133
+ E130 110 25 227
134
+ E131 121 38 239
135
+ E132 129 56 247
136
+ E133 103 7 176
137
+ E134 99 37 240
138
+ E135 103 19 104
139
+ E136 112 14 121
140
+ E137 120 19 101
141
+ E138 104 21 89
142
+ E139 86 20 102
143
+ E140 94 16 121
144
+ E141 103 8 138
145
+ E142 123 12 137
146
+ E143 128 17 120
147
+ E144 133 24 102
148
+ E145 127 27 80
149
+ E146 104 23 76
150
+ E147 82 25 81
151
+ E148 70 26 99
152
+ E149 74 18 118
153
+ E150 82 11 137
154
+ E151 89 5 155
155
+ E152 112 7 156
156
+ E153 141 21 137
157
+ E154 144 27 121
158
+ E155 145 40 81
159
+ E156 131 32 64
160
+ E157 106 26 59
161
+ E158 80 30 57
162
+ E159 65 34 80
163
+ E160 55 37 97
164
+ E161 60 26 116
165
+ E162 62 18 135
166
+ E163 69 10 154
167
+ E164 59 45 61
168
+ E165 48 29 133
169
+ E166 88 79 253
170
+ E167 84 99 250
171
+ E168 102 113 250
172
+ E169 116 102 250
173
+ E170 108 81 254
174
+ E171 77 65 251
175
+ E172 70 82 250
176
+ E173 72 107 246
177
+ E174 85 121 245
178
+ E175 102 131 244
179
+ E176 118 121 245
180
+ E177 132 108 245
181
+ E178 125 85 251
182
+ E179 118 68 252
183
+ E180 108 50 247
184
+ E181 88 50 247
185
+ E182 68 51 243
186
+ E183 58 68 246
187
+ E184 53 88 243
188
+ E185 57 110 238
189
+ E186 70 127 239
190
+ E187 85 142 239
191
+ E188 119 141 239
192
+ E189 131 127 240
193
+ E190 144 94 245
194
+ E191 142 73 247
195
+ E192 37 72 233
196
+ E193 48 27 204
197
+ E194 51 34 220
198
+ E195 64 20 206
199
+ E196 54 19 185
200
+ E197 37 30 184
201
+ E198 35 38 204
202
+ E199 37 48 220
203
+ E200 59 41 234
204
+ E201 68 28 225
205
+ E202 82 17 210
206
+ E203 72 12 189
207
+ E204 61 14 171
208
+ E205 43 25 171
209
+ E206 31 39 166
210
+ E207 26 45 187
211
+ E208 23 54 205
212
+ E209 27 65 220
213
+ E210 47 55 235
214
+ E211 78 39 240
215
+ E212 89 25 228
216
+ E213 93 10 194
217
+ E214 81 8 175
218
+ E215 52 20 153
219
+ E216 40 31 149
220
+ E217 29 47 150
221
+ E218 22 55 166
222
+ E219 18 61 184
223
+ E220 18 70 203
224
+ E221 17 73 161
225
+ E222 32 65 102
226
+ E223 26 57 132
227
+ E224 34 53 115
228
+ E225 44 49 96
229
+ E226 40 64 84
230
+ E227 34 80 76
231
+ E228 30 94 73
232
+ E229 26 101 93
233
+ E230 27 116 108
234
+ E231 23 106 125
235
+ E232 17 100 142
236
+ E233 22 60 147
237
+ E234 35 43 134
238
+ E235 46 39 114
239
+ E236 50 49 79
240
+ E237 47 61 60
241
+ E238 44 100 54
242
+ E239 36 111 69
243
+ E240 30 119 89
244
+ E241 26 128 123
245
+ E242 48 119 50
246
+ E243 24 126 195
247
+ E244 30 139 197
248
+ E245 29 120 210
249
+ E246 18 107 196
250
+ E247 17 116 177
251
+ E248 22 133 179
252
+ E249 30 150 182
253
+ E250 39 153 202
254
+ E251 39 135 214
255
+ E252 42 114 227
256
+ E253 26 102 216
257
+ E254 16 89 199
258
+ E255 14 99 178
259
+ E256 15 111 158
260
+ E257 20 131 159
261
+ E258 27 147 161
262
+ E259 34 162 163
263
+ E260 39 168 186
264
+ E261 53 150 219
265
+ E262 54 130 229
266
+ E263 37 93 230
267
+ E264 24 82 220
268
+ E265 15 81 182
269
+ E266 14 90 159
270
+ E267 21 118 140
271
+ E268 26 137 137
272
+ E269 32 154 142
273
+ E270 39 157 99
274
+ E271 35 138 83
275
+ E272 30 138 104
276
+ E273 29 146 122
277
+ E274 37 163 127
278
+ E275 53 168 72
279
+ E276 50 149 60
280
+ E277 40 129 67
281
+ E278 41 172 147
282
+ E279 53 190 149
283
+ E280 67 181 90
284
+ Cz 101 96 253
lcmv_class.py ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lcmv_class.py
2
+
3
+ # Imports
4
+ import mne
5
+ import numpy as np
6
+ import pandas as pd
7
+ import nibabel as nib
8
+ from tqdm import tqdm
9
+ import os, time, pickle
10
+ from pathlib import Path
11
+ import matplotlib.pyplot as plt
12
+ from nilearn import datasets, image
13
+
14
+ # Set MNE to only show warnings and errors
15
+ mne.set_log_level('warning')
16
+
17
+ class LCMVSourceEstimator:
18
+ def __init__(self, config):
19
+ """
20
+ Initialize the LCMV Source Estimator with configuration.
21
+
22
+ Parameters:
23
+ config (dict): Configuration dictionary containing all necessary parameters
24
+ """
25
+ self.config = config
26
+ self.project_base = Path(config['project_base'])
27
+ self.subject_id = config['subject_id']
28
+ self.task = config['task']
29
+
30
+ # GLOBAL directory for shared resources (fsaverage)
31
+ self.global_subjects_dir = self.project_base / 'derivatives/lcmv'
32
+
33
+ # SUBJECT-SPECIFIC directory for output
34
+ self.subject_output = self.project_base / f'derivatives/lcmv/{self.subject_id}_{self.task}'
35
+ self.subject_output.mkdir(parents=True, exist_ok=True)
36
+
37
+ def parse_gpsc(self, filepath):
38
+ """Parse .gpsc file and normalize coordinates to center the origin."""
39
+ channels = []
40
+ with open(filepath, 'r') as file:
41
+ lines = file.readlines()
42
+ for line in lines:
43
+ parts = line.strip().split()
44
+ if len(parts) < 4:
45
+ continue
46
+ name = parts[0]
47
+ try:
48
+ x, y, z = map(float, parts[1:4])
49
+ channels.append((name, x, y, z))
50
+ except ValueError:
51
+ continue
52
+ return channels
53
+
54
+ def run_enhanced_computation(self):
55
+ """Run the complete enhanced LCMV pipeline with improved coregistration"""
56
+ print("="*60)
57
+ print(f"🎯 ENHANCED LCMV SOURCE ESTIMATION - Subject: {self.subject_id}")
58
+ print("="*60)
59
+
60
+ print("\n=== Loading Data ===")
61
+ ica_file = self.project_base / self.config['ica_file_path']
62
+ gpsc_file = self.project_base / self.config['gpsc_file_path']
63
+
64
+ if not ica_file.exists():
65
+ raise FileNotFoundError(f"ICA file not found: {ica_file}")
66
+ if not gpsc_file.exists():
67
+ raise FileNotFoundError(f"GPSC file not found: {gpsc_file}")
68
+
69
+ # Load data
70
+ raw = mne.io.read_raw_fif(ica_file, preload=True)
71
+ sfreq = raw.info['sfreq']
72
+ duration_min = raw.n_times / sfreq / 60
73
+ print(f"Data: {duration_min:.1f}min, {sfreq}Hz, {raw.n_times} samples")
74
+
75
+ # === ENHANCED PREPROCESSING PIPELINE ===
76
+ print("\n=== Enhanced Preprocessing Pipeline ===")
77
+
78
+ # Rename channels to match .gpsc file
79
+ channel_map = {str(i): f'E{i}' for i in range(1, 281)}
80
+ channel_map['REF CZ'] = 'Cz'
81
+
82
+ # Only rename existing channels
83
+ existing_channels = set(raw.info['ch_names'])
84
+ valid_channel_map = {}
85
+ for old_name, new_name in channel_map.items():
86
+ if old_name in existing_channels:
87
+ valid_channel_map[old_name] = new_name
88
+
89
+ if valid_channel_map:
90
+ raw.rename_channels(valid_channel_map)
91
+ print(f"Renamed {len(valid_channel_map)} channels")
92
+
93
+
94
+
95
+ # === ENHANCED MONTAGE CREATION ===
96
+ print("\n=== Creating Enhanced Montage with Coordinate Normalization ===")
97
+
98
+ # Parse .gpsc file
99
+ channels = self.parse_gpsc(gpsc_file)
100
+
101
+ if not channels:
102
+ raise ValueError("No valid channels found in .gpsc file")
103
+
104
+ # Normalize coordinates to center the origin (enhanced method)
105
+ gpsc_array = np.array([ch[1:4] for ch in channels])
106
+ mean_pos = np.mean(gpsc_array, axis=0)
107
+ print(f"Original mean position (mm): {mean_pos}")
108
+
109
+ # Normalize and convert to meters
110
+ channels_normalized = [(ch[0], ch[1] - mean_pos[0], ch[2] - mean_pos[1], ch[3] - mean_pos[2])
111
+ for ch in channels]
112
+ ch_pos = {ch[0]: np.array(ch[1:4]) / 1000.0 for ch in channels_normalized}
113
+
114
+ # Check fiducials
115
+ required_fids = ['FidNz', 'FidT9', 'FidT10']
116
+ missing = [fid for fid in required_fids if fid not in ch_pos]
117
+ if missing:
118
+ raise ValueError(f"Missing fiducials: {missing}")
119
+
120
+ # Create montage with normalized coordinates
121
+ montage = mne.channels.make_dig_montage(
122
+ ch_pos=ch_pos,
123
+ nasion=ch_pos['FidNz'],
124
+ lpa=ch_pos['FidT9'],
125
+ rpa=ch_pos['FidT10'],
126
+ coord_frame='head'
127
+ )
128
+
129
+
130
+
131
+
132
+ # Apply montage and preprocessing
133
+ raw.set_montage(montage, on_missing='warn')
134
+ raw = raw.pick(['eeg', 'stim'], exclude=raw.info['bads'])
135
+
136
+ print("\nπŸ” Checking EEG reference status...")
137
+ print(f"custom_ref_applied: {raw.info['custom_ref_applied']}")
138
+ print(f"n_projs: {len(raw.info['projs'])}")
139
+ print(f"proj_applied: {raw.proj}")
140
+
141
+ # --- Ensure average reference projection is present ---
142
+ if not any(p['desc'] == 'average' for p in raw.info['projs']):
143
+ print("πŸ“Ž No average reference projection found. Applying it...")
144
+ raw.set_eeg_reference('average', projection=True)
145
+ else:
146
+ print("βœ… Average reference projection already in place.")
147
+
148
+ # --- Apply projections if not already applied ---
149
+ if not raw.proj:
150
+ print("🎯 Applying EEG average reference projection...")
151
+ raw.apply_proj()
152
+ else:
153
+ print("πŸ’‘ Projections already applied.")
154
+
155
+ print("βœ“ Enhanced preprocessing complete (reference now valid for inverse modeling)")
156
+
157
+ print(f"Enhanced montage applied:")
158
+ print(f"FidNz (nasion): {ch_pos['FidNz']}")
159
+ print(f"FidT9 (lpa): {ch_pos['FidT9']}")
160
+ print(f"FidT10 (rpa): {ch_pos['FidT10']}")
161
+
162
+ # === SOURCE SPACE SETUP ===
163
+ print("\n=== Source Space Setup ===")
164
+ subject = 'fsaverage'
165
+
166
+ # Download fsaverage if needed
167
+ bem_file = self.global_subjects_dir / 'fsaverage' / 'bem' / 'fsaverage-5120-5120-5120-bem-sol.fif'
168
+ bem_head = self.global_subjects_dir / 'fsaverage' / 'bem' / 'fsaverage-head-dense.fif'
169
+ src_file = self.global_subjects_dir / 'fsaverage-vol-5mm-src.fif'
170
+
171
+ if not bem_file.exists() or not bem_head.exists():
172
+ print("Downloading fsaverage to GLOBAL directory...")
173
+ mne.datasets.fetch_fsaverage(subjects_dir=self.global_subjects_dir, verbose=False)
174
+
175
+ # === ENHANCED COREGISTRATION ===
176
+ print("\n=== Running Enhanced Coregistration ===")
177
+ trans_file = self.subject_output / 'fsaverage-trans.fif'
178
+
179
+ try:
180
+ # Initialize coregistration with normalized coordinates
181
+ coreg = mne.coreg.Coregistration(
182
+ raw.info,
183
+ subject=subject,
184
+ subjects_dir=self.global_subjects_dir,
185
+ fiducials={
186
+ 'nasion': ch_pos['FidNz'],
187
+ 'lpa': ch_pos['FidT9'],
188
+ 'rpa': ch_pos['FidT10']
189
+ }
190
+ )
191
+
192
+ # Step 1: Fit with fiducials first
193
+ print("1/3: Fitting with fiducials...")
194
+ coreg.fit_fiducials(verbose=False)
195
+
196
+ # Step 2: Use EEG channels as head shape points for ICP
197
+ print("2/3: Using EEG channels as head shape points for ICP...")
198
+ coreg.fit_icp(n_iterations=6, nasion_weight=2.0, verbose=False)
199
+
200
+ # Remove outliers
201
+ print(" Removing outlier points...")
202
+ dists = coreg.compute_dig_mri_distances()
203
+ n_excluded = np.sum(dists > 5.0/1000)
204
+
205
+ if n_excluded > 0:
206
+ print(f" Excluding {n_excluded} outlier points (distance > 5mm)")
207
+ coreg.omit_head_shape_points(distance=5.0/1000)
208
+ else:
209
+ print(" No outlier points to exclude")
210
+
211
+ # Step 3: Final refinement with higher weight on nasion
212
+ print("3/3: Final ICP refinement...")
213
+ coreg.fit_icp(n_iterations=20, nasion_weight=10.0, verbose=False)
214
+
215
+ # Save transformation
216
+ trans = coreg.trans
217
+ mne.write_trans(trans_file, trans, overwrite=True)
218
+ print(f"βœ“ Enhanced coregistration successful: {trans_file}")
219
+
220
+ # Compute and display error metrics
221
+ dists = coreg.compute_dig_mri_distances() * 1000 # mm
222
+ mean_err = np.mean(dists)
223
+ median_err = np.median(dists)
224
+ max_err = np.max(dists)
225
+
226
+ print(f"\nCoregistration Error (mm):")
227
+ print(f"Mean: {mean_err:.2f}, Median: {median_err:.2f}, Max: {max_err:.2f}")
228
+
229
+ if mean_err > 5.0:
230
+ print(f"⚠️ WARNING: Mean error = {mean_err:.2f}mm > 5mm")
231
+ else:
232
+ print("βœ… Enhanced coregistration error acceptable")
233
+
234
+ except Exception as e:
235
+ # ❌ REMOVED IDENTITY FALLBACK β€” fail fast instead
236
+ print(f"❌ Coregistration failed irrecoverably: {e}")
237
+ raise RuntimeError(f"Coregistration failed: {e}")
238
+
239
+ # === SOURCE SPACE CREATION ===
240
+ print("\n=== Creating Source Space ===")
241
+ if not src_file.exists():
242
+ print("Creating volume source space...")
243
+ # βœ… FIXED: Added mri='T1.mgz' to ensure mri_ras_t exists
244
+ src = mne.setup_volume_source_space(
245
+ subject=subject,
246
+ subjects_dir=self.global_subjects_dir,
247
+ pos=5.0,
248
+ mri='T1.mgz', # ← critical for DiFuMo
249
+ add_interpolator=True
250
+ )
251
+ src.save(src_file, overwrite=True)
252
+ else:
253
+ src = mne.read_source_spaces(src_file)
254
+
255
+ print(f"Source space: {len(src[0]['vertno'])} active sources out of {src[0]['np']} total points")
256
+
257
+ # === FORWARD SOLUTION ===
258
+ print("\n=== Creating Forward Solution ===")
259
+ fwd_file = self.subject_output / 'fsaverage-vol-eeg-fwd.fif'
260
+ bem = mne.read_bem_solution(bem_file)
261
+ fwd = mne.make_forward_solution(
262
+ raw.info, trans=trans, src=src, bem=bem, eeg=True, mindist=5.0, n_jobs=self.config['n_jobs']
263
+ )
264
+ mne.write_forward_solution(fwd_file, fwd, overwrite=True)
265
+ print("βœ“ Enhanced source space setup complete")
266
+
267
+ # === LCMV BEAMFORMER ===
268
+ print("\n=== LCMV Beamformer ===")
269
+
270
+ # Compute SINGLE covariance from entire recording (CORRECT FOR CONTINUOUS DATA)
271
+ print("Computing single covariance from entire recording...")
272
+
273
+ cov = mne.compute_raw_covariance(
274
+ raw,
275
+ method='oas', # βœ… STATE-OF-THE-ART for long continuous data 'shrunk' or 'oas'
276
+ picks='eeg',
277
+ rank='info', # βœ… CRITICAL: Accounts for average reference
278
+ n_jobs=self.config['n_jobs'],
279
+ verbose=False)
280
+
281
+
282
+
283
+ # Create LCMV filters: Same covariance with proper rank handling
284
+ print("Creating LCMV spatial filters...")
285
+ filters = mne.beamformer.make_lcmv(
286
+ info=raw.info,
287
+ forward=fwd,
288
+ data_cov=cov,
289
+ noise_cov=cov, # Same matrix - correct for continuous data
290
+ reg=self.config['reg'],
291
+ pick_ori='max-power',
292
+ weight_norm='unit-noise-gain',
293
+ reduce_rank=True, # Must be True for average reference
294
+ rank='info', # CORRECT: Use rank information from info object
295
+ verbose=True
296
+ )
297
+
298
+
299
+
300
+ # Apply LCMV to continuous data
301
+ print("Applying LCMV filters to continuous data...")
302
+ stc = mne.beamformer.apply_lcmv_raw(raw=raw, filters=filters)
303
+
304
+ # Save STC in H5 format (required for complex data)
305
+ print("Saving STC in H5 format (required for complex data)...")
306
+ stc_file = self.subject_output / 'source_estimate_LCMV.h5'
307
+ stc.save(stc_file, ftype='h5', overwrite=True)
308
+ print(f"βœ“ STC saved successfully in H5 format: {stc_file}")
309
+
310
+ print(f"βœ“ LCMV complete: {stc.data.shape} (sources x timepoints)")
311
+ print(f"βœ“ STC file saved as: {stc_file}")
312
+
313
+ # === SAVE SOURCE SPACE INFORMATION ===
314
+ print("\n=== Saving source space information ===")
315
+
316
+ # For volume source spaces, stc.vertices[0] contains the indices of active sources
317
+ vertices = stc.vertices[0]
318
+
319
+ # Get the active source indices from the source space
320
+ active_indices = src[0]['vertno'] # indices of active sources in the full grid
321
+
322
+
323
+ # Map STC vertices to actual source space positions
324
+ src_points_m = src[0]['rr'][vertices]
325
+
326
+ src_points_mm = src_points_m * 1000 # Convert to mm
327
+
328
+ # Save the correctly indexed source points
329
+ np.save(self.subject_output / 'source_space_points_mm.npy', src_points_mm)
330
+
331
+ # Verify shapes match
332
+ print(f"STC data shape: {stc.data.shape}")
333
+ print(f"Source points shape: {src_points_mm.shape}")
334
+
335
+ if src_points_mm.shape[0] != stc.data.shape[0]:
336
+ print(f"WARNING: Shape mismatch detected!")
337
+ n_sources = min(src_points_mm.shape[0], stc.data.shape[0])
338
+ src_points_mm = src_points_mm[:n_sources]
339
+ print(f"Using first {n_sources} source points to match STC data")
340
+
341
+ print(f"βœ“ Source space points saved: {src_points_mm.shape} points")
342
+ print(f" Matches STC data shape: {stc.data.shape[0]} sources")
343
+
344
+ # === SAVE DEBUG INFO AND METADATA ===
345
+ print("\n=== Saving debug info and metadata ===")
346
+
347
+ # Save debugging info
348
+ debug_info = {
349
+ 'src_vertno': active_indices.tolist(),
350
+ 'stc_vertices': vertices.tolist(),
351
+ 'src_np': src[0]['np'],
352
+ 'n_active_sources': len(active_indices),
353
+ 'n_stc_vertices': len(vertices),
354
+ 'coregistration_error_mm': {
355
+ 'mean': mean_err if 'mean_err' in locals() else None,
356
+ 'median': median_err if 'median_err' in locals() else None,
357
+ 'max': max_err if 'max_err' in locals() else None
358
+ }
359
+ }
360
+ with open(self.subject_output / 'debug_source_info.pkl', 'wb') as f:
361
+ pickle.dump(debug_info, f)
362
+
363
+ # Save metadata
364
+ metadata = {
365
+ 'stc_shape': stc.data.shape,
366
+ 'n_source_points': len(vertices),
367
+ 'source_space_indices': vertices.tolist(),
368
+ 'sfreq': sfreq,
369
+ 'duration_min': duration_min,
370
+ 'stc_file': str(stc_file),
371
+ 'src_file': str(src_file),
372
+ 'subject_output': str(self.subject_output),
373
+ 'global_subjects_dir': str(self.global_subjects_dir),
374
+ 'enhanced_coregistration': True,
375
+ 'coordinate_normalization': 'mean_centered',
376
+ 'fiducials': {
377
+ 'FidNz': ch_pos['FidNz'].tolist(),
378
+ 'FidT9': ch_pos['FidT9'].tolist(),
379
+ 'FidT10': ch_pos['FidT10'].tolist()
380
+ }
381
+ }
382
+ with open(self.subject_output / 'computation_metadata.pkl', 'wb') as f:
383
+ pickle.dump(metadata, f)
384
+
385
+ print(f"βœ“ Enhanced computation complete and metadata saved")
386
+ print(f"\nπŸŽ‰ ENHANCED LCMV SOURCE ESTIMATION COMPLETE!")
387
+ print(f" - Enhanced coregistration with error checking")
388
+ print(f" - Proper coordinate normalization")
389
+ print(f" - All original outputs maintained")
390
+ print(f" - Results saved to: {self.subject_output}")
391
+
392
+ return metadata
393
+
394
+ def extract_difumo_time_courses(self, stc, src, config, subject_output):
395
+ """Extract weighted time courses from DiFuMo atlas."""
396
+ print("\n=== DiFuMo Processing ===")
397
+ atlas = datasets.fetch_atlas_difumo(
398
+ dimension=config['dimension'],
399
+ resolution_mm=config['resolution_mm']
400
+ )
401
+ atlas_img = nib.load(atlas.maps)
402
+ atlas_shape = atlas_img.shape # (x, y, z, n_components)
403
+ n_components = atlas_shape[3]
404
+
405
+ # Get source locations in mm
406
+ vertices = stc.vertices[0]
407
+ src_rr = src[0]['rr'][vertices] * 1000 # m β†’ mm
408
+
409
+ # Apply MRI RAS transform to get MNI coordinates
410
+ try:
411
+ trans = src[0]['mri_ras_t']['trans']
412
+ except KeyError:
413
+ raise ValueError("Source space missing 'mri_ras_t' transform. Ensure it's a proper volume source space.")
414
+
415
+ mni_coords = image.coord_transform(src_rr[:, 0], src_rr[:, 1], src_rr[:, 2], trans)
416
+ src_coords_mni = np.array(mni_coords).T # (n_sources, 3)
417
+
418
+ # Convert MNI mm β†’ voxel indices in atlas space
419
+ homog = np.column_stack([src_coords_mni, np.ones(len(src_coords_mni))])
420
+ vox_coords = (np.linalg.inv(atlas_img.affine) @ homog.T).T[:, :3]
421
+ vox_coords = np.round(vox_coords).astype(int)
422
+
423
+ # Filter valid voxels inside atlas bounds
424
+ valid_mask = (
425
+ (vox_coords >= 0).all(axis=1) &
426
+ (vox_coords[:, 0] < atlas_shape[0]) &
427
+ (vox_coords[:, 1] < atlas_shape[1]) &
428
+ (vox_coords[:, 2] < atlas_shape[2])
429
+ )
430
+ valid_indices = np.where(valid_mask)[0]
431
+ valid_voxels = vox_coords[valid_mask]
432
+
433
+ print(f"Using {len(valid_indices)}/{len(vertices)} sources within atlas bounds")
434
+
435
+ # Extract time courses
436
+ time_courses = []
437
+ component_info = []
438
+ threshold = 1e-6
439
+
440
+ # βœ… ADDED: Handle complex-valued STC (though max-power should be real)
441
+ if np.iscomplexobj(stc.data):
442
+ print("⚠️ STC is complex β€” taking absolute value for DiFuMo")
443
+ stc.data = np.abs(stc.data)
444
+
445
+ for comp_idx in range(n_components):
446
+ if comp_idx % 100 == 0:
447
+ print(f"Processing component {comp_idx + 1}/{n_components}")
448
+
449
+ try:
450
+ comp_map = atlas_img.slicer[..., comp_idx].get_fdata()
451
+ weights, stc_indices = [], []
452
+
453
+ for i, (x, y, z) in enumerate(valid_voxels):
454
+ prob = comp_map[x, y, z]
455
+ if prob > threshold:
456
+ weights.append(prob)
457
+ stc_indices.append(valid_indices[i])
458
+
459
+ if weights:
460
+ weights = np.array(weights)
461
+ weights /= weights.sum() # Normalize
462
+ tc = np.average(stc.data[stc_indices], axis=0, weights=weights)
463
+ info = {
464
+ 'component': comp_idx,
465
+ 'n_sources': len(stc_indices),
466
+ 'max_weight': weights.max(),
467
+ 'mean_weight': weights.mean()
468
+ }
469
+ else:
470
+ tc = np.zeros(stc.data.shape[1])
471
+ info = {
472
+ 'component': comp_idx,
473
+ 'n_sources': 0,
474
+ 'max_weight': 0.0,
475
+ 'mean_weight': 0.0
476
+ }
477
+
478
+ time_courses.append(tc)
479
+ component_info.append(info)
480
+
481
+ except Exception as e:
482
+ print(f"Error in component {comp_idx}: {e}")
483
+ time_courses.append(np.zeros(stc.data.shape[1]))
484
+ component_info.append({
485
+ 'component': comp_idx, 'n_sources': 0, 'max_weight': 0.0, 'mean_weight': 0.0
486
+ })
487
+
488
+ # Summary
489
+ valid_comps = sum(1 for info in component_info if info['n_sources'] > 0)
490
+ print(f"βœ… {valid_comps}/{n_components} components have at least one source")
491
+
492
+ # Save outputs
493
+ subject_output = Path(subject_output)
494
+ np.save(subject_output / 'difumo_time_courses.npy', np.array(time_courses))
495
+ pd.DataFrame(component_info).to_csv(subject_output / 'difumo_component_info.csv', index=False)
496
+ print(f"πŸ’Ύ Saved to: {subject_output}")
497
+
498
+ return np.array(time_courses), component_info
499
+
500
+ def run_difumo_extraction(self, difumo_config=None):
501
+ """Run DiFuMo time course extraction on existing data."""
502
+ if difumo_config is None:
503
+ difumo_config = {
504
+ 'dimension': 512,
505
+ 'resolution_mm': 2 # 2mm resolution for 512-component DiFuMo
506
+ }
507
+
508
+ try:
509
+ # --- USER INPUT: UPDATE THESE IF NEEDED ---
510
+ subject_output = self.subject_output
511
+ stc_base_name = "source_estimate_LCMV" # without extension
512
+
513
+ # --- AUTODETECT STC FILE (handles .stc, -vl.stc, .h5) ---
514
+ stc_file = None
515
+ for suffix in ['-vl.stc', '.stc', '.h5']:
516
+ candidate = subject_output / f"{stc_base_name}{suffix}"
517
+ if candidate.exists():
518
+ stc_file = candidate
519
+ break
520
+ if not stc_file:
521
+ raise FileNotFoundError(f"STC file not found in {subject_output}")
522
+
523
+ # --- LOAD DATA ---
524
+ print(f"πŸ” Loading STC: {stc_file}")
525
+ stc = mne.read_source_estimate(stc_file)
526
+ print(f"Loaded STC: {stc.data.shape} (sources Γ— time)")
527
+
528
+ # βœ… FIXED: Use consistent global path (no hardcoded path)
529
+ src_file = self.global_subjects_dir / "fsaverage-vol-5mm-src.fif"
530
+
531
+ print(f"πŸ” Loading source space: {src_file}")
532
+ if not src_file.exists():
533
+ raise FileNotFoundError(f"Source space not found: {src_file}")
534
+ src = mne.read_source_spaces(src_file)
535
+ print(f"Loaded source space with {len(src[0]['vertno'])} active sources")
536
+
537
+ # --- RUN EXTRACTION ---
538
+ time_courses, component_info = self.extract_difumo_time_courses(
539
+ stc=stc,
540
+ src=src,
541
+ config=difumo_config,
542
+ subject_output=subject_output
543
+ )
544
+
545
+ print("\nπŸŽ‰ SUCCESS: DiFuMo time series extraction complete!")
546
+ print(f"πŸ“Š Output shape: {time_courses.shape} (512 components Γ— {time_courses.shape[1]} time points)")
547
+ print(f"πŸ“„ Details saved in:\n - {subject_output / 'difumo_time_courses.npy'}\n - {subject_output / 'difumo_component_info.csv'}")
548
+
549
+ return time_courses, component_info
550
+
551
+ except Exception as e:
552
+ print(f"❌ Error during DiFuMo extraction: {e}")
553
+ raise
554
+
555
+ def list_output_files(self):
556
+ """List all files in the output folder."""
557
+ print(f"\n=== Files in output folder: {self.subject_output} ===")
558
+ for file in os.listdir(self.subject_output):
559
+ print(file)
560
+ return list(os.listdir(self.subject_output))
561
+
562
+
563
+
564
+ # --- CONFIGURATION ---
565
+ PROJECT_BASE = "/home/jaizor/jaizor/xtra"
566
+ CROP_BASE_DIR = Path(PROJECT_BASE) / "derivatives/ica"
567
+ GPS_FILE_PATH = "data/ghw280_from_egig.gpsc"
568
+
569
+ # Configuration template
570
+ CONFIG_TEMPLATE = {
571
+ 'project_base': PROJECT_BASE,
572
+ 'gpsc_file_path': GPS_FILE_PATH,
573
+ 'reg': 0.01,
574
+ 'n_jobs': -1,
575
+ 'skip_difumo': False
576
+ }