FLASHCast / maxunitstreamflow_to_zarr.py
chrimerss's picture
Upload folder using huggingface_hub
e416ff0 verified
#!/usr/bin/env python3
"""
Convert MRMS FLASH_CREST_MAXUNITSTREAMFLOW_00.00 GRIB2 tiles to a single Zarr store.
- Reads 10-minute GRIB2 files by day from the input root (e.g.,
/glade/derecho/scratch/li1995/data/FLASH_CREST_MAXUNITSTREAMFLOW_00.00/YYYYMMDD/...).
- Aligns latitude/longitude to an existing precip Zarr template that shares the grid.
- Writes float32 data with Zarr chunking (time=1, lat=438, lon=875) using parallel I/O per day.
- Appends day-by-day to keep memory bounded; can utilize larger memory (e.g., 128GB) by
increasing per-day parallelism.
Testing (login node, small sample):
ml conda; conda activate credit
python /glade/work/li1995/FLASH/data/maxunitstreamflow_to_zarr.py \
--input-root /glade/derecho/scratch/li1995/data/FLASH_CREST_MAXUNITSTREAMFLOW_00.00 \
--template-zarr /glade/derecho/scratch/li1995/data/precip_2021_2025.zarr \
--output-zarr /glade/derecho/scratch/li1995/data/maxunitstreamflow_2021_2025_10min.zarr \
--start 2021-01-01 --end 2021-01-02 \
--workers 8 --test-limit 36
Full run (batch node with more memory):
python /glade/work/li1995/FLASH/data/maxunitstreamflow_to_zarr.py \
--input-root /glade/derecho/scratch/li1995/data/FLASH_CREST_MAXUNITSTREAMFLOW_00.00 \
--template-zarr /glade/derecho/scratch/li1995/data/precip_2021_2025.zarr \
--output-zarr /glade/derecho/scratch/li1995/data/maxunitstreamflow_2021_2025_10min.zarr \
--start 2021-01-01 --end 2025-08-01 \
--workers 24
"""
from __future__ import annotations
import argparse
import concurrent.futures
import datetime as dt
import os
import re
from pathlib import Path
from typing import Iterable, List, Optional, Sequence, Tuple
import numpy as np
import xarray as xr
from zarr.codecs import Zstd as ZstdCodec
# Avoid overly verbose warnings when opening many GRIBs
os.environ.setdefault("CFGRIB_LOGGING_LEVEL", "ERROR")
FILENAME_RE = re.compile(r"_([0-9]{8})-([0-9]{6})\.grib2$")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Build MAXUNITSTREAMFLOW Zarr from daily MRMS GRIB2 files"
)
parser.add_argument(
"--input-root",
type=str,
required=True,
help="Root directory containing YYYYMMDD/ with GRIB2 files",
)
parser.add_argument(
"--template-zarr",
type=str,
required=True,
help="Existing precip Zarr with matching latitude/longitude",
)
parser.add_argument(
"--output-zarr",
type=str,
required=True,
help="Output Zarr store path to create/append",
)
parser.add_argument(
"--start",
type=str,
default="2021-01-01",
help="Start date (YYYY-MM-DD)",
)
parser.add_argument(
"--end",
type=str,
default="2025-08-01",
help="End date inclusive (YYYY-MM-DD)",
)
parser.add_argument(
"--workers",
type=int,
default=16,
help="Parallel workers for per-day GRIB reads",
)
parser.add_argument(
"--test-limit",
type=int,
default=0,
help="If >0, limit number of timesteps per day for testing",
)
parser.add_argument(
"--rechunk-lat",
type=int,
default=438,
help="Latitude chunk size",
)
parser.add_argument(
"--rechunk-lon",
type=int,
default=875,
help="Longitude chunk size",
)
return parser.parse_args()
def daterange(start: dt.date, end_inclusive: dt.date) -> Iterable[dt.date]:
day = start
one = dt.timedelta(days=1)
while day <= end_inclusive:
yield day
day += one
def list_grib2_files(day_dir: Path) -> List[Path]:
if not day_dir.is_dir():
return []
return sorted(p for p in day_dir.iterdir() if p.name.endswith(".grib2"))
def extract_datetime_from_name(path: Path) -> Optional[np.datetime64]:
m = FILENAME_RE.search(path.name)
if not m:
return None
ymd, hms = m.group(1), m.group(2)
try:
dt_obj = dt.datetime.strptime(f"{ymd}{hms}", "%Y%m%d%H%M%S")
return np.datetime64(dt_obj)
except Exception:
return None
def open_grib2_as_array(
file_path: Path,
target_shape: Tuple[int, int],
missing_to_nan: bool = True,
) -> np.ndarray:
"""Open GRIB2 with cfgrib and return data as float32 array (1, y, x).
Chooses the first data variable that has both latitude and longitude in dims.
"""
ds = xr.open_dataset(
file_path.as_posix(), engine="cfgrib", backend_kwargs={}, chunks={}
)
try:
var_name = None
for name, da in ds.data_vars.items():
if "latitude" in da.dims and "longitude" in da.dims:
var_name = name
break
if var_name is None:
# fall back to first variable
var_name = next(iter(ds.data_vars.keys()))
da = ds[var_name]
data = da.values # loads into memory for this tile
if data.ndim == 2:
data2d = data
elif data.ndim == 3 and data.shape[0] == 1:
data2d = data[0]
else:
raise ValueError(
f"Unexpected data shape {data.shape} in {file_path.name} for var {var_name}"
)
if data2d.shape != target_shape:
raise ValueError(
f"Array shape {data2d.shape} != expected {target_shape} for {file_path.name}"
)
arr = data2d.astype(np.float32, copy=False)
if missing_to_nan:
# Replace typical GRIB missing sentinel with NaN if present
# 3.4028235e+38 is float32 max
arr = np.where(arr >= np.finfo(np.float32).max * 0.99, np.nan, arr)
# add time axis
return arr[None, ...]
finally:
ds.close()
def datetime64_to_epoch_seconds(times: Sequence[np.datetime64]) -> np.ndarray:
"""Convert datetime64 values to float64 seconds since 1970-01-01.
This avoids cftime issues and matches the precip Zarr convention.
"""
if len(times) == 0:
return np.array([], dtype=np.float64)
t_ns = np.array(times, dtype="datetime64[ns]")
base = np.datetime64("1970-01-01", "ns")
delta_ns = (t_ns - base).astype("timedelta64[ns]").astype(np.int64)
return (delta_ns.astype(np.float64) / 1e9).astype(np.float64)
def build_day_arrays(
grib_files: Sequence[Path],
lat_size: int,
lon_size: int,
workers: int,
test_limit: int,
) -> Tuple[np.ndarray, List[np.datetime64]]:
"""Read a day's GRIB tiles in parallel → stacked (T, Y, X) float32 and time list."""
if test_limit > 0:
grib_files = grib_files[:test_limit]
times: List[Tuple[np.datetime64, int]] = []
for i, f in enumerate(grib_files):
t = extract_datetime_from_name(f)
if t is not None:
times.append((t, i))
# Keep only files with parseable time
if not times:
return np.empty((0, lat_size, lon_size), dtype=np.float32), []
times_sorted = sorted(times, key=lambda x: x[0])
sorted_indices = [i for _, i in times_sorted]
sorted_files = [grib_files[i] for i in sorted_indices]
sorted_times = [t for t, _ in times_sorted]
target_shape = (lat_size, lon_size)
def _load(path: Path) -> np.ndarray:
return open_grib2_as_array(path, target_shape=target_shape, missing_to_nan=True)
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as ex:
arrays = list(ex.map(_load, sorted_files))
if not arrays:
return np.empty((0, lat_size, lon_size), dtype=np.float32), []
stacked = np.concatenate(arrays, axis=0)
return stacked, sorted_times
def ensure_output_store(
output_zarr: str,
var_name: str,
lat: xr.DataArray,
lon: xr.DataArray,
chunks: Tuple[int, int, int],
compressor: ZstdCodec,
) -> None:
"""Create the Zarr store if missing with metadata and encodings."""
if Path(output_zarr).exists():
return
empty = xr.DataArray(
data=np.empty((0, lat.size, lon.size), dtype=np.float32),
dims=("time", "latitude", "longitude"),
coords={
# Use float64 seconds since epoch to mirror precip store
"time": np.array([], dtype=np.float64),
"latitude": lat,
"longitude": lon,
},
name=var_name,
attrs={"long_name": "MAXUNITSTREAMFLOW", "units": "unknown"},
).to_dataset(name=var_name)
# Add CF-like attributes for time
empty["time"].attrs = {
"standard_name": "time",
"long_name": "time",
"units": "seconds since 1970-01-01",
"calendar": "proleptic_gregorian",
}
encoding = {
var_name: {"dtype": "float32", "chunks": chunks, "compressors": (compressor,)},
# store as float64 seconds
"time": {"dtype": "float64"},
}
empty.to_zarr(
output_zarr,
mode="w",
consolidated=True,
encoding=encoding,
zarr_format=3,
)
def append_day(
output_zarr: str,
var_name: str,
day_data: np.ndarray,
day_times: Sequence[np.datetime64],
lat: xr.DataArray,
lon: xr.DataArray,
chunks: Tuple[int, int, int],
compressor: ZstdCodec,
) -> None:
if day_data.size == 0:
return
# Convert filename datetimes to epoch seconds (float64)
time_seconds = datetime64_to_epoch_seconds(day_times)
ds = xr.Dataset(
{
var_name: xr.DataArray(
day_data,
dims=("time", "latitude", "longitude"),
coords={"time": time_seconds, "latitude": lat, "longitude": lon},
attrs={"long_name": "MAXUNITSTREAMFLOW", "units": "unknown"},
)
}
)
# Ensure time coordinate attributes persist on append
ds["time"].attrs = {
"standard_name": "time",
"long_name": "time",
"units": "seconds since 1970-01-01",
"calendar": "proleptic_gregorian",
}
# Ensure no per-variable encodings are carried into append (xarray will error if present)
if var_name in ds:
ds[var_name].encoding = {}
if "time" in ds:
ds["time"].encoding = {}
if "latitude" in ds:
ds["latitude"].encoding = {}
if "longitude" in ds:
ds["longitude"].encoding = {}
ds.to_zarr(
output_zarr,
mode="a",
append_dim="time",
consolidated=False,
zarr_format=3,
)
def get_existing_max_time(output_zarr: str) -> Optional[np.datetime64]:
p = Path(output_zarr)
if not p.exists():
return None
try:
ds_out = xr.open_zarr(output_zarr, consolidated=True, decode_times=False)
except Exception:
ds_out = xr.open_zarr(output_zarr, consolidated=False, decode_times=False)
try:
if "time" in ds_out.coords and ds_out.sizes.get("time", 0) > 0:
tvar = ds_out["time"]
tvals = tvar.values
last = tvals[-1]
# Cases: datetime64, numeric epoch, or cftime/object
if isinstance(last, np.datetime64):
return last.astype("datetime64[ns]")
if np.issubdtype(tvals.dtype, np.datetime64):
return np.array(last, dtype="datetime64[ns]")
if np.issubdtype(tvals.dtype, np.number):
units = (tvar.attrs or {}).get("units", "seconds since 1970-01-01")
base_match = re.match(r"(seconds|milliseconds|microseconds|nanoseconds|minutes|hours|days) since (\d{4}-\d{2}-\d{2})", units)
if base_match:
unit, base_date = base_match.groups()
base = np.datetime64(base_date, "s")
# Map unit to numpy timedelta unit
unit_map = {
"nanoseconds": "ns",
"microseconds": "us",
"milliseconds": "ms",
"seconds": "s",
"minutes": "m",
"hours": "h",
"days": "D",
}
np_unit = unit_map.get(unit, "s")
delta = np.array(last, dtype=f"timedelta64[{np_unit}]")
return (base + delta).astype("datetime64[ns]")
# Fallback treat as seconds since epoch
base = np.datetime64("1970-01-01", "s")
delta = np.array(last, dtype="timedelta64[s]")
return (base + delta).astype("datetime64[ns]")
# Attempt string conversion (e.g., cftime)
try:
return np.datetime64(str(last)).astype("datetime64[ns]")
except Exception:
return None
return None
finally:
ds_out.close()
def main() -> None:
args = parse_args()
input_root = Path(args.input_root)
template_zarr = args.template_zarr
output_zarr = args.output_zarr
start_date = dt.datetime.strptime(args.start, "%Y-%m-%d").date()
end_date = dt.datetime.strptime(args.end, "%Y-%m-%d").date()
# Load template lat/lon from precip Zarr
ds_tpl = xr.open_zarr(template_zarr, consolidated=True)
try:
lat = ds_tpl["latitude"].load()
lon = ds_tpl["longitude"].load()
finally:
ds_tpl.close()
lat_size, lon_size = int(lat.size), int(lon.size)
var_name = "maxunitstreamflow"
chunks = (1, int(args.rechunk_lat), int(args.rechunk_lon))
compressor = ZstdCodec(level=3)
# Ensure store exists with metadata
ensure_output_store(output_zarr, var_name, lat, lon, chunks, compressor)
existing_max_time = get_existing_max_time(output_zarr)
if existing_max_time is not None:
print(f"Resuming after {existing_max_time}")
for day in daterange(start_date, end_date):
day_dir = input_root / day.strftime("%Y%m%d")
gribs = list_grib2_files(day_dir)
if not gribs:
continue
# Filter by resume point
if existing_max_time is not None:
gribs = [
f
for f in gribs
if (t := extract_datetime_from_name(f)) is not None and t > existing_max_time
]
if not gribs:
continue
print(f"Processing {day_dir} with {len(gribs)} files ...")
day_data, day_times = build_day_arrays(
gribs,
lat_size=lat_size,
lon_size=lon_size,
workers=int(args.workers),
test_limit=int(args.test_limit),
)
if day_data.size == 0:
continue
append_day(
output_zarr,
var_name,
day_data,
day_times,
lat,
lon,
chunks,
compressor,
)
# Consolidate metadata at the end for faster reads
try:
import zarr as zarr_pkg
zarr_pkg.consolidate_metadata(output_zarr)
except Exception as e:
print(f"Consolidate metadata skipped: {e}")
if __name__ == "__main__":
main()