basavyr commited on
Commit
a15bf28
·
1 Parent(s): 0519dc2

create pytorch dataloader from parquet files

Browse files
.gitignore CHANGED
@@ -30,3 +30,4 @@ cache
30
  # SLURM
31
  jobs
32
  slurm-*
 
 
30
  # SLURM
31
  jobs
32
  slurm-*
33
+ .python-version
scripts/debug_structure.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Debug parquet data structure.
3
+ """
4
+
5
+ import pandas as pd
6
+ from pathlib import Path
7
+ import json
8
+
9
+ def debug_structure():
10
+ data_path = Path("data")
11
+ parquet_file = data_path / "train-00000-of-00017.parquet"
12
+
13
+ df = pd.read_parquet(parquet_file)
14
+ print(f"DataFrame shape: {df.shape}")
15
+ print(f"Columns: {list(df.columns)}")
16
+
17
+ # Check first sample
18
+ first_row = df.iloc[0]
19
+ print(f"\nFirst row data types:")
20
+ for col in df.columns:
21
+ print(f" {col}: {type(first_row[col])}")
22
+
23
+ # Check image column structure
24
+ image_data = first_row['image']
25
+ print(f"\nImage data type: {type(image_data)}")
26
+ if isinstance(image_data, dict):
27
+ print(f"Image dict keys: {list(image_data.keys())}")
28
+ for key, value in image_data.items():
29
+ print(f" {key}: {type(value)} - {str(value)[:100]}...")
30
+ elif isinstance(image_data, bytes):
31
+ print(f"Image bytes length: {len(image_data)}")
32
+ else:
33
+ print(f"Image data: {str(image_data)[:200]}...")
34
+
35
+ if __name__ == "__main__":
36
+ debug_structure()
scripts/pytorch_dataloader.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pandas as pd
3
+ from torch.utils.data import Dataset, DataLoader
4
+ from PIL import Image
5
+ import io
6
+ from pathlib import Path
7
+ from torchvision import transforms
8
+
9
+
10
+ class ImageNet100Parquet(Dataset):
11
+ def __init__(self, data_dir: str, split: str = "train", transform=None):
12
+ self.data_dir = Path(data_dir)
13
+ self.transform = transform
14
+
15
+ # Load all parquet files for the split
16
+ parquet_files = sorted(self.data_dir.glob(f"{split}-*.parquet"))
17
+ self.data = pd.concat([pd.read_parquet(f)
18
+ for f in parquet_files], ignore_index=True)
19
+
20
+ def __len__(self):
21
+ return len(self.data)
22
+
23
+ def __getitem__(self, idx):
24
+ row = self.data.iloc[idx]
25
+
26
+ # Decode image from bytes (stored in dict)
27
+ image_bytes = row['image']['bytes']
28
+ image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
29
+ label = torch.tensor(row['label'], dtype=torch.long)
30
+
31
+ if self.transform:
32
+ image = self.transform(image)
33
+
34
+ return image, label
35
+
36
+
37
+ def main():
38
+ # Define transforms
39
+ transform = transforms.Compose([
40
+ transforms.Resize((224, 224)),
41
+ transforms.ToTensor(),
42
+ ])
43
+
44
+ # Create datasets
45
+ train_dataset = ImageNet100Parquet("data", "train", transform)
46
+ test_dataset = ImageNet100Parquet("data", "validation", transform)
47
+
48
+ # Create dataloaders
49
+ train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
50
+ test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False)
51
+
52
+ print(f"Train dataset size: {len(train_dataset)}")
53
+ print(f"Test dataset size: {len(test_dataset)}")
54
+
55
+ # Test iteration
56
+ print("\nTesting train loader iteration...")
57
+ stop_idx = 0
58
+ for x, y_true in train_loader:
59
+ print(x.shape, y_true.shape)
60
+ stop_idx += 1
61
+ if stop_idx > 10:
62
+ break
63
+
64
+ stop_idx = 0
65
+ print("\nTesting test loader iteration...")
66
+ for x, y_true in test_loader:
67
+ print(x.shape, y_true.shape)
68
+ stop_idx += 1
69
+ if stop_idx > 10:
70
+ break
71
+
72
+
73
+ if __name__ == "__main__":
74
+ main()