aleks-wordcab commited on
Commit
f022e4d
·
verified ·
1 Parent(s): 1d62bdc

Upload ./example_load.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. example_load.py +109 -0
example_load.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Example script for loading preprocessed LibriBrain MEG data.
4
+
5
+ This script demonstrates how to load and use the preprocessed MEG data
6
+ with the pnpl library for training machine learning models.
7
+ """
8
+
9
+ import numpy as np
10
+ from pnpl.datasets import GroupedDataset
11
+ from torch.utils.data import DataLoader
12
+ import torch
13
+
14
+ def load_preprocessed_data(grouping_level=100, load_to_memory=True):
15
+ """
16
+ Load preprocessed LibriBrain MEG data.
17
+
18
+ Args:
19
+ grouping_level: Number of samples grouped together (5, 10, 15, 20, 25, 30, 45, 50, 55, 60, or 100)
20
+ load_to_memory: If True, loads entire dataset to memory for faster access
21
+
22
+ Returns:
23
+ Tuple of (train_dataset, val_dataset, test_dataset)
24
+ """
25
+ base_path = f"data/grouped_{grouping_level}"
26
+
27
+ # Load training data
28
+ train_dataset = GroupedDataset(
29
+ preprocessed_path=f"{base_path}/train_grouped.h5",
30
+ load_to_memory=load_to_memory
31
+ )
32
+
33
+ # Load validation data
34
+ val_dataset = GroupedDataset(
35
+ preprocessed_path=f"{base_path}/validation_grouped.h5",
36
+ load_to_memory=load_to_memory
37
+ )
38
+
39
+ # Load test data
40
+ test_dataset = GroupedDataset(
41
+ preprocessed_path=f"{base_path}/test_grouped.h5",
42
+ load_to_memory=load_to_memory
43
+ )
44
+
45
+ return train_dataset, val_dataset, test_dataset
46
+
47
+
48
+ def main():
49
+ # Example 1: Load data with 100-sample grouping
50
+ print("Loading preprocessed MEG data with 100-sample grouping...")
51
+ train_dataset, val_dataset, test_dataset = load_preprocessed_data(
52
+ grouping_level=100,
53
+ load_to_memory=True
54
+ )
55
+
56
+ print(f"Dataset sizes:")
57
+ print(f" Train: {len(train_dataset)} samples")
58
+ print(f" Validation: {len(val_dataset)} samples")
59
+ print(f" Test: {len(test_dataset)} samples")
60
+
61
+ # Example 2: Get a single sample
62
+ sample = train_dataset[0]
63
+ meg_data = sample['meg'] # MEG signals: (306 channels, time_points)
64
+ phoneme_label = sample['phoneme'] # Phoneme class index
65
+
66
+ print(f"\nSample structure:")
67
+ print(f" MEG shape: {meg_data.shape}")
68
+ print(f" Phoneme label: {phoneme_label}")
69
+
70
+ # Example 3: Use with PyTorch DataLoader
71
+ print("\nCreating PyTorch DataLoader...")
72
+ dataloader = DataLoader(
73
+ train_dataset,
74
+ batch_size=32,
75
+ shuffle=True,
76
+ num_workers=4,
77
+ pin_memory=True # For GPU training
78
+ )
79
+
80
+ # Example 4: Iterate through a batch
81
+ print("\nExample batch:")
82
+ for batch_idx, batch in enumerate(dataloader):
83
+ meg_batch = batch['meg'] # Shape: (batch_size, 306, time_points)
84
+ phoneme_batch = batch['phoneme'] # Shape: (batch_size,)
85
+
86
+ print(f" Batch {batch_idx}:")
87
+ print(f" MEG batch shape: {meg_batch.shape}")
88
+ print(f" Phoneme batch shape: {phoneme_batch.shape}")
89
+
90
+ if batch_idx >= 2: # Show only first 3 batches
91
+ break
92
+
93
+ # Example 5: Different grouping levels for different speed/accuracy trade-offs
94
+ print("\n" + "="*50)
95
+ print("Available grouping levels:")
96
+ print(" - grouped_5: Highest fidelity, largest files")
97
+ print(" - grouped_10: High fidelity")
98
+ print(" - grouped_20: Good balance")
99
+ print(" - grouped_50: Faster loading, moderate averaging")
100
+ print(" - grouped_100: Fastest loading, most averaging")
101
+ print("\nChoose based on your requirements:")
102
+ print(" - For maximum accuracy: use lower grouping (5-20)")
103
+ print(" - For faster experimentation: use higher grouping (50-100)")
104
+ print(" - For production models: start with high grouping for prototyping,")
105
+ print(" then switch to lower grouping for final training")
106
+
107
+
108
+ if __name__ == "__main__":
109
+ main()