import torch import triton import triton.language as tl import time @triton.jit def copy_kernel(A, B, N, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(0) offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < N b = tl.load(B + offsets, mask=mask) tl.store(A + offsets, b, mask=mask) @triton.jit def triad_kernel(A, B, C, scalar, N, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(0) offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < N b = tl.load(B + offsets, mask=mask) c = tl.load(C + offsets, mask=mask) a = b + scalar * c tl.store(A + offsets, a, mask=mask) def run_stream(): print("--- Blitz Artisan STREAM Benchmark (H200 HBM3e) ---") N = 1024 * 1024 * 128 # 128M elements A = torch.empty(N, device="cuda", dtype=torch.float32) B = torch.randn(N, device="cuda", dtype=torch.float32) C = torch.randn(N, device="cuda", dtype=torch.float32) scalar = 3.14 grid = (triton.cdiv(N, 1024),) # Benchmark COPY torch.cuda.synchronize() start = time.time() for _ in range(100): copy_kernel[grid](A, B, N, BLOCK_SIZE=1024) torch.cuda.synchronize() copy_bw = (2 * N * 4) / ((time.time() - start) / 100) / 1e12 print(f"COPY Bandwidth: {copy_bw:.2f} TB/s") # Benchmark TRIAD torch.cuda.synchronize() start = time.time() for _ in range(100): triad_kernel[grid](A, B, C, scalar, N, BLOCK_SIZE=1024) torch.cuda.synchronize() triad_bw = (3 * N * 4) / ((time.time() - start) / 100) / 1e12 print(f"TRIAD Bandwidth: {triad_bw:.2f} TB/s") if __name__ == "__main__": run_stream()