File size: 11,444 Bytes
216ed4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2878a71
216ed4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
"""
EIT Dataset Loader - Direct Python Class (No HuggingFace script loading)

This loader provides direct access to the EIT dataset stored in HDF5 format.
Can be used standalone or wrapped for HuggingFace datasets compatibility.
"""

import h5py
import numpy as np
from pathlib import Path
from typing import Dict, List, Tuple, Optional
import torch
from torch.utils.data import Dataset


class EITDataset(Dataset):
    """
    PyTorch Dataset for EIT (Electrical Impedance Tomography) data.
    
    Args:
        data_dir: Base directory containing the dataset
        subset: Which dataset to load ("CirclesOnly" or "FourObjects")
        split: Which split to load ("train", "val", or "test")
        image_resolution: Image resolution ("32_log", "64_log", "128_log", or "256")
        load_to_memory: If True, load all data to RAM (faster but memory intensive)
    """
    
    def __init__(
        self,
        data_dir: str,
        subset: str = "CirclesOnly",
        split: str = "train",
        image_resolution: str = "128_log",
        load_to_memory: bool = False
    ):
        self.data_dir = Path(data_dir)
        self.subset = subset
        self.split = split
        self.image_resolution = image_resolution
        self.load_to_memory = load_to_memory
        
        # Paths
        self.subset_path = self.data_dir / subset
        self.h5_path = self.subset_path / "dataset.h5"
        
        # Map split name to file name
        split_map = {"train": "train.txt", "val": "val.txt", "test": "test.txt"}
        self.split_file = self.subset_path / "parameters" / split_map[split]
        
        # Load split indices
        self._load_split_indices()
        
        # Load data to memory if requested
        if self.load_to_memory:
            self._load_to_memory()
        else:
            self.cached_data = None
    
    def _load_split_indices(self):
        """Load the indices for this split."""
        with open(self.split_file, 'r') as f:
            self.indices = [int(line.strip()) for line in f if line.strip()]
    
    def _load_to_memory(self):
        """Load all data for this split into memory."""
        print(f"Loading {len(self.indices)} samples to memory...")
        self.cached_data = []
        
        with h5py.File(self.h5_path, "r") as h5_file:
            voltage_data = h5_file["volt"]["16"]
            image_data = h5_file["image"][self.image_resolution]
            
            # Determine graph key
            graph_key = self.image_resolution if self.image_resolution != "256" else "128_log"
            has_graph = graph_key in h5_file["graph"]
            
            for sample_idx in self.indices:
                voltage = voltage_data[:, sample_idx].astype(np.float32)
                image = image_data[:, :, sample_idx].astype(np.float32)
                
                sample = {
                    'voltage_measurements': voltage,
                    'conductivity_map': image,
                    'sample_id': sample_idx
                }
                
                if has_graph:
                    graph = h5_file["graph"][graph_key][:, sample_idx].astype(np.float32)
                    sample['graph_representation'] = graph
                
                self.cached_data.append(sample)
        
        print("Data loaded to memory!")
    
    def __len__(self) -> int:
        return len(self.indices)
    
    def __getitem__(self, idx: int) -> Dict[str, np.ndarray]:
        """Get a single sample."""
        if self.cached_data is not None:
            # Return from cached data
            return self.cached_data[idx]
        
        # Read from HDF5 file on-the-fly
        sample_idx = self.indices[idx]
        
        with h5py.File(self.h5_path, "r") as h5_file:
            voltage = h5_file["volt"]["16"][:, sample_idx].astype(np.float32)
            image = h5_file["image"][self.image_resolution][:, :, sample_idx].astype(np.float32)
            
            sample = {
                'voltage_measurements': voltage,
                'conductivity_map': image,
                'sample_id': sample_idx
            }
            
            # Add graph representation if available
            graph_key = self.image_resolution if self.image_resolution != "256" else "128_log"
            if graph_key in h5_file["graph"]:
                graph = h5_file["graph"][graph_key][:, sample_idx].astype(np.float32)
                sample['graph_representation'] = graph
        
        return sample
    
    def get_image_shape(self) -> Tuple[int, int]:
        """Get the shape of conductivity maps."""
        resolution_map = {
            "32_log": (32, 32),
            "64_log": (64, 64),
            "128_log": (128, 128),
            "256": (256, 256)
        }
        return resolution_map.get(self.image_resolution, (128, 128))
    
    def get_statistics(self) -> Dict:
        """Calculate dataset statistics."""
        print("Calculating statistics...")
        voltage_sum = np.zeros(256, dtype=np.float64)
        voltage_sq_sum = np.zeros(256, dtype=np.float64)
        image_sum = 0.0
        image_sq_sum = 0.0
        n_samples = len(self)
        
        with h5py.File(self.h5_path, "r") as h5_file:
            voltage_data = h5_file["volt"]["16"]
            image_data = h5_file["image"][self.image_resolution]
            
            for sample_idx in self.indices:
                voltage = voltage_data[:, sample_idx]
                image = image_data[:, :, sample_idx]
                
                voltage_sum += voltage
                voltage_sq_sum += voltage ** 2
                image_sum += np.sum(image)
                image_sq_sum += np.sum(image ** 2)
        
        n_pixels = n_samples * self.get_image_shape()[0] * self.get_image_shape()[1]
        
        stats = {
            'voltage_mean': voltage_sum / n_samples,
            'voltage_std': np.sqrt(voltage_sq_sum / n_samples - (voltage_sum / n_samples) ** 2),
            'image_mean': image_sum / n_pixels,
            'image_std': np.sqrt(image_sq_sum / n_pixels - (image_sum / n_pixels) ** 2),
            'n_samples': n_samples
        }
        
        return stats


class EITDataModule:
    """
    Convenience class to manage all splits of the EIT dataset.
    
    Args:
        data_dir: Base directory containing the dataset
        subset: Which dataset to load ("CirclesOnly" or "FourObjects")
        image_resolution: Image resolution ("32_log", "64_log", "128_log", or "256")
        batch_size: Batch size for DataLoaders
        num_workers: Number of workers for DataLoaders
        load_to_memory: If True, load all data to RAM
    """
    
    def __init__(
        self,
        data_dir: str,
        subset: str = "CirclesOnly",
        image_resolution: str = "128_log",
        batch_size: int = 32,
        num_workers: int = 4,
        load_to_memory: bool = False
    ):
        self.data_dir = data_dir
        self.subset = subset
        self.image_resolution = image_resolution
        self.batch_size = batch_size
        self.num_workers = num_workers
        self.load_to_memory = load_to_memory
        
        # Create datasets
        self.train_dataset = EITDataset(
            data_dir, subset, "train", image_resolution, load_to_memory
        )
        self.val_dataset = EITDataset(
            data_dir, subset, "val", image_resolution, load_to_memory
        )
        self.test_dataset = EITDataset(
            data_dir, subset, "test", image_resolution, load_to_memory
        )
    
    def train_dataloader(self, **kwargs):
        """Get training DataLoader."""
        from torch.utils.data import DataLoader
        return DataLoader(
            self.train_dataset,
            batch_size=kwargs.get('batch_size', self.batch_size),
            shuffle=True,
            num_workers=kwargs.get('num_workers', self.num_workers),
            pin_memory=True
        )
    
    def val_dataloader(self, **kwargs):
        """Get validation DataLoader."""
        from torch.utils.data import DataLoader
        return DataLoader(
            self.val_dataset,
            batch_size=kwargs.get('batch_size', self.batch_size),
            shuffle=False,
            num_workers=kwargs.get('num_workers', self.num_workers),
            pin_memory=True
        )
    
    def test_dataloader(self, **kwargs):
        """Get test DataLoader."""
        from torch.utils.data import DataLoader
        return DataLoader(
            self.test_dataset,
            batch_size=kwargs.get('batch_size', self.batch_size),
            shuffle=False,
            num_workers=kwargs.get('num_workers', self.num_workers),
            pin_memory=True
        )
    
    def get_statistics(self):
        """Get statistics for all splits."""
        return {
            'train': self.train_dataset.get_statistics(),
            'val': self.val_dataset.get_statistics(),
            'test': self.test_dataset.get_statistics()
        }


# Example usage
if __name__ == "__main__":
    print("="*60)
    print("EIT Dataset Loader - Example Usage")
    print("="*60)
    
    # Create dataset
    data_dir = "https://huggingface.co/datasets/AymanAmeen/SimEIT-dataset"
    
    print("\n1. Creating datasets...")
    train_dataset = EITDataset(
        data_dir=data_dir,
        subset="CirclesOnly",
        split="train",
        image_resolution="128_log",
        load_to_memory=False
    )
    
    print(f"   Train dataset size: {len(train_dataset)}")
    print(f"   Image shape: {train_dataset.get_image_shape()}")
    
    # Get a sample
    print("\n2. Loading a sample...")
    sample = train_dataset[0]
    print(f"   Keys: {list(sample.keys())}")
    print(f"   Voltage measurements shape: {sample['voltage_measurements'].shape}")
    print(f"   Conductivity map shape: {sample['conductivity_map'].shape}")
    if 'graph_representation' in sample:
        print(f"   Graph representation shape: {sample['graph_representation'].shape}")
    print(f"   Sample ID: {sample['sample_id']}")
    
    # Create DataModule
    print("\n3. Creating EITDataModule...")
    data_module = EITDataModule(
        data_dir=data_dir,
        subset="CirclesOnly",
        image_resolution="128_log",
        batch_size=4,
        num_workers=0  # Set to 0 for testing, increase for training
    )
    
    print(f"   Train samples: {len(data_module.train_dataset)}")
    print(f"   Val samples: {len(data_module.val_dataset)}")
    print(f"   Test samples: {len(data_module.test_dataset)}")
    
    # Create DataLoader
    print("\n4. Creating DataLoader and getting a batch...")
    train_loader = data_module.train_dataloader()
    batch = next(iter(train_loader))
    print(f"   Batch voltage shape: {batch['voltage_measurements'].shape}")
    print(f"   Batch image shape: {batch['conductivity_map'].shape}")
    print(f"   Batch IDs: {batch['sample_id'].tolist()}")
    
    # Test different configurations
    print("\n5. Testing different resolutions...")
    for resolution in ["32_log", "64_log", "128_log", "256"]:
        try:
            ds = EITDataset(data_dir, "CirclesOnly", "train", resolution)
            print(f"   {resolution}: {len(ds)} samples, shape: {ds.get_image_shape()}")
        except Exception as e:
            print(f"   {resolution}: Error - {e}")
    
    print("\n" + "="*60)
    print("All tests completed successfully!")
    print("="*60)