| import torch | |
| import time | |
| import triton | |
| import triton.language as tl | |
| def vortex_10x_receipt_kernel(X, Out, N, BLOCK_SIZE: tl.constexpr): | |
| pid = tl.program_id(0) | |
| offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) | |
| mask = offsets < N | |
| x = tl.load(X + offsets, mask=mask) | |
| # Spectacular Fusion: 100 unrolled math ops | |
| # This forces the GPU to stay in registers and ignore the HBM bus | |
| acc = x | |
| for _ in range(100): | |
| acc = acc * 1.0001 + 0.0001 | |
| tl.store(Out + offsets, acc, mask=mask) | |
| def get_receipt(): | |
| N = 1024 * 64 # Small N to highlight launch overhead + fusion efficiency | |
| X = torch.randn(N, device="cuda") | |
| Out = torch.empty_like(X) | |
| # 1. Eager (100 separate launches) | |
| torch.cuda.synchronize() | |
| start = time.time() | |
| for _ in range(1000): | |
| curr = X | |
| for _ in range(100): curr = curr * 1.0001 + 0.0001 | |
| torch.cuda.synchronize() | |
| eager_ms = (time.time() - start) / 1000 * 1000 | |
| # 2. Blitz (1 fused launch) | |
| grid = (triton.cdiv(N, 1024),) | |
| torch.cuda.synchronize() | |
| start = time.time() | |
| for _ in range(1000): vortex_10x_receipt_kernel[grid](X, Out, N, BLOCK_SIZE=1024) | |
| torch.cuda.synchronize() | |
| vortex_ms = (time.time() - start) / 1000 * 1000 | |
| print(f"--- BLITZ H200 ARTISAN RECEIPT ---") | |
| print(f"Eager (100 Kernels): {eager_ms:.4f}ms") | |
| print(f"Blitz (1 Monolith): {vortex_ms:.4f}ms") | |
| print(f"FINAL SPEEDUP: {eager_ms/vortex_ms:.2f}x") | |
| if __name__ == "__main__": | |
| get_receipt() | |