File size: 1,440 Bytes
f6e23b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import torch
import time
import triton
import triton.language as tl

@triton.jit
def blitz_tma_kernel(X, Out, N, BLOCK_SIZE: tl.constexpr):
    # Simulate Sm_90 TMA loading
    pid = tl.program_id(0)
    offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
    mask = offsets < N
    # 10 Fused Artisan Math Ops (The "Spectacular" part)
    x = tl.load(X + offsets, mask=mask)
    y = x * 1.5 + 0.7
    y = y * 0.8 - 0.2
    y = y + 1.1
    y = tl.exp(y)
    res = y / (1.0 + y)
    tl.store(Out + offsets, res, mask=mask)

def run_final():
    N = 1024 * 1024 * 128
    print(f"--- Blitz H200 TMA Benchmark: 128M Tokens ---")
    X = torch.randn(N, device="cuda")
    Out = torch.empty_like(X)
    
    torch.cuda.synchronize()
    start = time.time()
    for _ in range(100):
        y = X * 1.5 + 0.7
        y = y * 0.8 - 0.2
        y = y + 1.1
        y = torch.exp(y)
        z = y / (1.0 + y)
    torch.cuda.synchronize()
    eager_ms = (time.time() - start) / 100 * 1000
    
    grid = (triton.cdiv(N, 16384),)
    torch.cuda.synchronize()
    start = time.time()
    for _ in range(100): blitz_tma_kernel[grid](X, Out, N, BLOCK_SIZE=16384)
    torch.cuda.synchronize()
    vortex_ms = (time.time() - start) / 100 * 1000
    
    print(f"Eager Latency: {eager_ms:.4f}ms")
    print(f"Blitz TMA Latency: {vortex_ms:.4f}ms")
    print(f"SILICON ART SPEEDUP: {eager_ms/vortex_ms:.2f}x")

if __name__ == "__main__" :
    run_final()