File size: 1,762 Bytes
df0e176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from datasets import load_dataset
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from scripts.forward_model import LidarForwardImagingModel

forward_model = LidarForwardImagingModel()

BATCH_SIZE = 64

def make_loader(split: str):
    ds = load_dataset("anfera236/HHDC", split=split)
    # return PyTorch tensors for the "cube" column
    ds.set_format(type="torch", columns=["cube"])
    # wrap in a DataLoader to get batches
    loader = DataLoader(
        ds,
        batch_size=BATCH_SIZE,
        shuffle=False,   # no need to shuffle for shape checking
    )
    return ds, loader


def check_split(split_name: str):
    print(f"Checking {split_name} dataset batches (batch_size={BATCH_SIZE})...")
    ds, loader = make_loader(split_name)

    for batch in tqdm(loader):
        cubes = batch["cube"]           # shape: (B, 128, 48, 48)
        # sanity check on input shape
        assert cubes.ndim == 4, f"Expected 4D input (B, 128, 48, 48), got {cubes.shape}"
        assert cubes.shape[1:] == (128, 48, 48), f"Bad input sample shape: {cubes.shape}"

        # forward pass (expects model to support batched input)
        output = forward_model(cubes)   # expected shape: (B, 128, 32, 16)

        # sanity checks on output shape
        assert output.ndim == 4, f"Expected 4D output (B, 128, 32, 16), got {output.shape}"
        assert output.shape[0] == cubes.shape[0], (
            f"Batch size mismatch: input B={cubes.shape[0]}, output B={output.shape[0]}"
        )
        assert output.shape[1:] == (128, 32, 16), f"Bad output sample shape: {output.shape}"


if __name__ == "__main__":
    check_split("train")
    check_split("validation")
    check_split("test")
    print("All splits passed shape checks ✅")