nwm-hydrology-benchmark / examples /pytorch_dataloader.py
Fahad Alghanim
Add NWM hydrology benchmark
a21f73b
import argparse
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
import xarray as xr
from torch.utils.data import DataLoader, Dataset
@dataclass(frozen=True)
class Window:
lookback_hours: int = 168
horizon_hours: int = 24
stride_hours: int = 24
class NwmBasinForecastDataset(Dataset):
"""
Returns:
x: (basin, lookback_hours, 2) float32 -> [streamflow, precipitation_rate]
y: (basin, horizon_hours) float32 -> streamflow
"""
def __init__(self, zarr_path: str, *, split: str, window: Window = Window()) -> None:
ds = xr.open_zarr(zarr_path, consolidated=True)
self.ds = ds
self.window = window
# (time, basin)
sf = ds["streamflow"]
pr = ds["precipitation_rate"]
# Basic time splits for 2018–2019 hourly data.
# Train: 2018
# Val: 2019-01 .. 2019-06
# Test: 2019-07 .. 2019-12
t = ds["time"].values
train_end = np.datetime64("2018-12-31T23:00:00")
val_end = np.datetime64("2019-06-30T23:00:00")
if split == "train":
t0 = 0
t1 = int(np.searchsorted(t, train_end, side="right")) - 1
elif split == "val":
t0 = int(np.searchsorted(t, train_end, side="right"))
t1 = int(np.searchsorted(t, val_end, side="right")) - 1
elif split == "test":
t0 = int(np.searchsorted(t, val_end, side="right"))
t1 = len(t) - 1
else:
raise ValueError("split must be one of: train, val, test")
n = window.lookback_hours + window.horizon_hours
self.start_indices = list(range(t0, t1 - n + 2, window.stride_hours))
self.sf = sf
self.pr = pr
def __len__(self) -> int:
return len(self.start_indices)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
i = self.start_indices[idx]
lb = self.window.lookback_hours
hz = self.window.horizon_hours
sf_x = np.asarray(self.sf.isel(time=slice(i, i + lb)).values, dtype=np.float32) # (lb, basin)
pr_x = np.asarray(self.pr.isel(time=slice(i, i + lb)).values, dtype=np.float32) # (lb, basin)
y = np.asarray(self.sf.isel(time=slice(i + lb, i + lb + hz)).values, dtype=np.float32) # (hz, basin)
# Return per-basin sequences.
x = np.stack([sf_x.T, pr_x.T], axis=-1) # (basin, lb, 2)
y = y.T # (basin, hz)
return torch.from_numpy(x), torch.from_numpy(y)
def main() -> None:
p = argparse.ArgumentParser()
p.add_argument("--zarr", default="nwm_hydrology_benchmark.zarr")
p.add_argument("--split", default="train", choices=["train", "val", "test"])
p.add_argument("--batch-size", type=int, default=4)
p.add_argument("--num-workers", type=int, default=0)
args = p.parse_args()
ds = NwmBasinForecastDataset(args.zarr, split=args.split)
dl = DataLoader(ds, batch_size=args.batch_size, shuffle=(args.split == "train"), num_workers=args.num_workers)
x, y = next(iter(dl))
print("x:", tuple(x.shape), x.dtype, "y:", tuple(y.shape), y.dtype)
if __name__ == "__main__":
main()