Datasets:
Fahad Alghanim commited on
Commit ·
6271445
1
Parent(s): 88a0b03
Add S3 streaming benchmark
Browse filesExtend throughput benchmark to measure direct streaming from the public S3 Zarr and record baseline numbers in the dataset card.
- README.md +2 -2
- bench/throughput_benchmark.py +60 -1
README.md
CHANGED
|
@@ -93,6 +93,6 @@ Then fill in the table below (the script prints a Markdown row you can paste her
|
|
| 93 |
|
| 94 |
| mode | samples/sec | MB/sec | first_batch_sec |
|
| 95 |
|---|---:|---:|---:|
|
| 96 |
-
| local | 0.
|
| 97 |
-
|
|
| 98 |
|
|
|
|
| 93 |
|
| 94 |
| mode | samples/sec | MB/sec | first_batch_sec |
|
| 95 |
|---|---:|---:|---:|
|
| 96 |
+
| local | 0.366 | 351.922 | 3.598 |
|
| 97 |
+
| streaming_s3 | 0.109 | 104.646 | 9.505 |
|
| 98 |
|
bench/throughput_benchmark.py
CHANGED
|
@@ -44,6 +44,62 @@ def benchmark_local(zarr_path: str, *, n_samples: int = 16, in_days: int = 7, ou
|
|
| 44 |
return BenchResult('local', samples_per_sec, mb_per_sec, first)
|
| 45 |
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
def benchmark_hf(repo_id: str, *, n_samples: int = 16, in_days: int = 7, out_days: int = 7, seed: int = 0) -> BenchResult:
|
| 48 |
"""Benchmark streaming from HF using the hf:// fsspec protocol.
|
| 49 |
|
|
@@ -84,6 +140,7 @@ def benchmark_hf(repo_id: str, *, n_samples: int = 16, in_days: int = 7, out_day
|
|
| 84 |
def main() -> None:
|
| 85 |
p = argparse.ArgumentParser()
|
| 86 |
p.add_argument('--local', help='Path to pacific_sst.zarr')
|
|
|
|
| 87 |
p.add_argument('--hf', help='HF dataset repo_id, e.g. KokosDev/mur-sst-ml-benchmark')
|
| 88 |
p.add_argument('--n-samples', type=int, default=16)
|
| 89 |
p.add_argument('--seed', type=int, default=0)
|
|
@@ -92,6 +149,8 @@ def main() -> None:
|
|
| 92 |
results = []
|
| 93 |
if args.local:
|
| 94 |
results.append(benchmark_local(args.local, n_samples=args.n_samples, seed=args.seed))
|
|
|
|
|
|
|
| 95 |
if args.hf:
|
| 96 |
try:
|
| 97 |
results.append(benchmark_hf(args.hf, n_samples=args.n_samples, seed=args.seed))
|
|
@@ -99,7 +158,7 @@ def main() -> None:
|
|
| 99 |
print('HF streaming benchmark failed:', repr(e))
|
| 100 |
print('Tip: install newer huggingface_hub + fsspec with hf:// support')
|
| 101 |
if not results:
|
| 102 |
-
raise SystemExit('Provide --local and/or --hf')
|
| 103 |
|
| 104 |
print('## Throughput benchmark')
|
| 105 |
for r in results:
|
|
|
|
| 44 |
return BenchResult('local', samples_per_sec, mb_per_sec, first)
|
| 45 |
|
| 46 |
|
| 47 |
+
def benchmark_s3(
|
| 48 |
+
s3_root: str,
|
| 49 |
+
*,
|
| 50 |
+
n_samples: int = 16,
|
| 51 |
+
in_days: int = 7,
|
| 52 |
+
out_days: int = 7,
|
| 53 |
+
seed: int = 0,
|
| 54 |
+
time_start: str = "2018-01-01",
|
| 55 |
+
time_end: str = "2019-12-31",
|
| 56 |
+
lat_min: float = 20.0,
|
| 57 |
+
lat_max: float = 50.0,
|
| 58 |
+
lon_min_360: float = 180.0,
|
| 59 |
+
lon_max_360: float = 240.0,
|
| 60 |
+
) -> BenchResult:
|
| 61 |
+
"""Benchmark streaming reads directly from the public AWS S3 Zarr."""
|
| 62 |
+
import s3fs
|
| 63 |
+
|
| 64 |
+
fs = s3fs.S3FileSystem(anon=True)
|
| 65 |
+
store = s3fs.S3Map(root=s3_root, s3=fs, check=False)
|
| 66 |
+
ds = xr.open_zarr(store, consolidated=True, decode_times=True, mask_and_scale=True, chunks={})
|
| 67 |
+
|
| 68 |
+
# Convert lon from [-180, 180] to [0, 360) and subset region/time.
|
| 69 |
+
ds = ds.assign_coords(lon=((ds["lon"] % 360).astype("float32"))).sortby("lon")
|
| 70 |
+
ds = ds[["analysed_sst"]].sel(
|
| 71 |
+
time=slice(np.datetime64(time_start), np.datetime64(time_end)),
|
| 72 |
+
lat=slice(lat_min, lat_max),
|
| 73 |
+
lon=slice(lon_min_360, lon_max_360),
|
| 74 |
+
)
|
| 75 |
+
var = ds["analysed_sst"]
|
| 76 |
+
|
| 77 |
+
rng = np.random.RandomState(seed)
|
| 78 |
+
max_start = int(ds.sizes["time"]) - (in_days + out_days)
|
| 79 |
+
idxs = rng.randint(0, max_start + 1, size=n_samples)
|
| 80 |
+
|
| 81 |
+
t0 = time.time()
|
| 82 |
+
i0 = int(idxs[0])
|
| 83 |
+
x0 = np.asarray(var.isel(time=slice(i0, i0 + in_days)).values)
|
| 84 |
+
y0 = np.asarray(var.isel(time=slice(i0 + in_days, i0 + in_days + out_days)).values)
|
| 85 |
+
_ = float(x0.mean() + y0.mean())
|
| 86 |
+
first = time.time() - t0
|
| 87 |
+
|
| 88 |
+
t1 = time.time()
|
| 89 |
+
bytes_read = 0
|
| 90 |
+
for i in idxs:
|
| 91 |
+
i = int(i)
|
| 92 |
+
x = np.asarray(var.isel(time=slice(i, i + in_days)).values)
|
| 93 |
+
y = np.asarray(var.isel(time=slice(i + in_days, i + in_days + out_days)).values)
|
| 94 |
+
bytes_read += x.nbytes + y.nbytes
|
| 95 |
+
_ = float(x.mean() + y.mean())
|
| 96 |
+
|
| 97 |
+
dt = time.time() - t1
|
| 98 |
+
samples_per_sec = float(len(idxs) / max(dt, 1e-9))
|
| 99 |
+
mb_per_sec = float((bytes_read / (1024 * 1024)) / max(dt, 1e-9))
|
| 100 |
+
return BenchResult("streaming_s3", samples_per_sec, mb_per_sec, first)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
def benchmark_hf(repo_id: str, *, n_samples: int = 16, in_days: int = 7, out_days: int = 7, seed: int = 0) -> BenchResult:
|
| 104 |
"""Benchmark streaming from HF using the hf:// fsspec protocol.
|
| 105 |
|
|
|
|
| 140 |
def main() -> None:
|
| 141 |
p = argparse.ArgumentParser()
|
| 142 |
p.add_argument('--local', help='Path to pacific_sst.zarr')
|
| 143 |
+
p.add_argument('--s3-root', help='Public S3 Zarr root (e.g. mur-sst/zarr-v1)')
|
| 144 |
p.add_argument('--hf', help='HF dataset repo_id, e.g. KokosDev/mur-sst-ml-benchmark')
|
| 145 |
p.add_argument('--n-samples', type=int, default=16)
|
| 146 |
p.add_argument('--seed', type=int, default=0)
|
|
|
|
| 149 |
results = []
|
| 150 |
if args.local:
|
| 151 |
results.append(benchmark_local(args.local, n_samples=args.n_samples, seed=args.seed))
|
| 152 |
+
if args.s3_root:
|
| 153 |
+
results.append(benchmark_s3(args.s3_root, n_samples=args.n_samples, seed=args.seed))
|
| 154 |
if args.hf:
|
| 155 |
try:
|
| 156 |
results.append(benchmark_hf(args.hf, n_samples=args.n_samples, seed=args.seed))
|
|
|
|
| 158 |
print('HF streaming benchmark failed:', repr(e))
|
| 159 |
print('Tip: install newer huggingface_hub + fsspec with hf:// support')
|
| 160 |
if not results:
|
| 161 |
+
raise SystemExit('Provide --local and/or --s3-root and/or --hf')
|
| 162 |
|
| 163 |
print('## Throughput benchmark')
|
| 164 |
for r in results:
|