| | import argparse |
| | from dataclasses import dataclass |
| | from typing import Tuple |
| |
|
| | import numpy as np |
| | import torch |
| | import xarray as xr |
| | from torch.utils.data import DataLoader, Dataset |
| |
|
| |
|
| | @dataclass(frozen=True) |
| | class Window: |
| | lookback_hours: int = 168 |
| | horizon_hours: int = 24 |
| | stride_hours: int = 24 |
| |
|
| |
|
| | class NwmBasinForecastDataset(Dataset): |
| | """ |
| | Returns: |
| | x: (basin, lookback_hours, 2) float32 -> [streamflow, precipitation_rate] |
| | y: (basin, horizon_hours) float32 -> streamflow |
| | """ |
| |
|
| | def __init__(self, zarr_path: str, *, split: str, window: Window = Window()) -> None: |
| | ds = xr.open_zarr(zarr_path, consolidated=True) |
| | self.ds = ds |
| | self.window = window |
| |
|
| | |
| | sf = ds["streamflow"] |
| | pr = ds["precipitation_rate"] |
| |
|
| | |
| | |
| | |
| | |
| | t = ds["time"].values |
| | train_end = np.datetime64("2018-12-31T23:00:00") |
| | val_end = np.datetime64("2019-06-30T23:00:00") |
| |
|
| | if split == "train": |
| | t0 = 0 |
| | t1 = int(np.searchsorted(t, train_end, side="right")) - 1 |
| | elif split == "val": |
| | t0 = int(np.searchsorted(t, train_end, side="right")) |
| | t1 = int(np.searchsorted(t, val_end, side="right")) - 1 |
| | elif split == "test": |
| | t0 = int(np.searchsorted(t, val_end, side="right")) |
| | t1 = len(t) - 1 |
| | else: |
| | raise ValueError("split must be one of: train, val, test") |
| |
|
| | n = window.lookback_hours + window.horizon_hours |
| | self.start_indices = list(range(t0, t1 - n + 2, window.stride_hours)) |
| |
|
| | self.sf = sf |
| | self.pr = pr |
| |
|
| | def __len__(self) -> int: |
| | return len(self.start_indices) |
| |
|
| | def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]: |
| | i = self.start_indices[idx] |
| | lb = self.window.lookback_hours |
| | hz = self.window.horizon_hours |
| |
|
| | sf_x = np.asarray(self.sf.isel(time=slice(i, i + lb)).values, dtype=np.float32) |
| | pr_x = np.asarray(self.pr.isel(time=slice(i, i + lb)).values, dtype=np.float32) |
| | y = np.asarray(self.sf.isel(time=slice(i + lb, i + lb + hz)).values, dtype=np.float32) |
| |
|
| | |
| | x = np.stack([sf_x.T, pr_x.T], axis=-1) |
| | y = y.T |
| |
|
| | return torch.from_numpy(x), torch.from_numpy(y) |
| |
|
| |
|
| | def main() -> None: |
| | p = argparse.ArgumentParser() |
| | p.add_argument("--zarr", default="nwm_hydrology_benchmark.zarr") |
| | p.add_argument("--split", default="train", choices=["train", "val", "test"]) |
| | p.add_argument("--batch-size", type=int, default=4) |
| | p.add_argument("--num-workers", type=int, default=0) |
| | args = p.parse_args() |
| |
|
| | ds = NwmBasinForecastDataset(args.zarr, split=args.split) |
| | dl = DataLoader(ds, batch_size=args.batch_size, shuffle=(args.split == "train"), num_workers=args.num_workers) |
| |
|
| | x, y = next(iter(dl)) |
| | print("x:", tuple(x.shape), x.dtype, "y:", tuple(y.shape), y.dtype) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|
| |
|