|
|
import torch |
|
|
import triton |
|
|
import triton.language as tl |
|
|
|
|
|
|
|
|
if not torch.cuda.is_available(): |
|
|
raise RuntimeError("CUDA is not available. This benchmark requires a CUDA-enabled GPU.") |
|
|
DEVICE = torch.device("cuda:0") |
|
|
torch.cuda.set_device(DEVICE) |
|
|
|
|
|
@triton.jit |
|
|
def gelu(x): |
|
|
return x * 0.5 * (1.0 + tl.extra.cuda.libdevice.erf(x * 0.7071067811865476)) |
|
|
|
|
|
@triton.autotune( |
|
|
configs=[ |
|
|
triton.Config( |
|
|
{ |
|
|
"BLOCK_SIZE_M": 128, |
|
|
"BLOCK_SIZE_N": 128, |
|
|
"BLOCK_SIZE_K": 64, |
|
|
"GROUP_SIZE_M": 8, |
|
|
"NUM_STAGES": 4, |
|
|
}, |
|
|
num_stages=4, |
|
|
num_warps=8, |
|
|
), |
|
|
], |
|
|
key=["M", "N", "K"], |
|
|
use_cuda_graph=True, |
|
|
) |
|
|
@triton.jit |
|
|
def matmul_kernel( |
|
|
a_ptr, |
|
|
b_ptr, |
|
|
c_ptr, |
|
|
M, |
|
|
N, |
|
|
K, |
|
|
stride_am, |
|
|
stride_ak, |
|
|
stride_bk, |
|
|
stride_bn, |
|
|
stride_cm, |
|
|
stride_cn, |
|
|
|
|
|
BLOCK_SIZE_M: tl.constexpr, |
|
|
BLOCK_SIZE_N: tl.constexpr, |
|
|
BLOCK_SIZE_K: tl.constexpr, |
|
|
GROUP_SIZE_M: tl.constexpr, |
|
|
NUM_STAGES: tl.constexpr, |
|
|
): |
|
|
"""Kernel for computing the matmul C = A x B. |
|
|
A has shape (M, K), B has shape (K, N) and C has shape (M, N) |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pid = tl.program_id(axis=0) |
|
|
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) |
|
|
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) |
|
|
num_pid_in_group = GROUP_SIZE_M * num_pid_n |
|
|
group_id = pid // num_pid_in_group |
|
|
first_pid_m = group_id * GROUP_SIZE_M |
|
|
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) |
|
|
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m) |
|
|
pid_n = (pid % num_pid_in_group) // group_size_m |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M |
|
|
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N |
|
|
offs_k = tl.arange(0, BLOCK_SIZE_K) |
|
|
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) |
|
|
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) |
|
|
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): |
|
|
|
|
|
|
|
|
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) |
|
|
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) |
|
|
|
|
|
accumulator = tl.dot(a, b, accumulator) |
|
|
|
|
|
a_ptrs += BLOCK_SIZE_K * stride_ak |
|
|
b_ptrs += BLOCK_SIZE_K * stride_bk |
|
|
accumulator = gelu(accumulator) |
|
|
c = accumulator.to(tl.float16) |
|
|
|
|
|
|
|
|
|
|
|
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) |
|
|
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) |
|
|
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :] |
|
|
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N) |
|
|
tl.store(c_ptrs, c, mask=c_mask) |
|
|
|
|
|
|
|
|
def matmul(a, b): |
|
|
assert a.shape[1] == b.shape[0], "Illegal dimensions of input operands" |
|
|
assert a.is_contiguous(), "Matrix A must be contiguous" |
|
|
|
|
|
(M, N, K) = (a.shape[0], b.shape[1], a.shape[1]) |
|
|
c = torch.zeros((M, N), dtype=torch.float16, device=DEVICE) |
|
|
|
|
|
grid = lambda META: ( |
|
|
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]), |
|
|
) |
|
|
matmul_kernel[grid]( |
|
|
a, |
|
|
b, |
|
|
c, |
|
|
M, |
|
|
N, |
|
|
K, |
|
|
a.stride(0), |
|
|
a.stride(1), |
|
|
b.stride(0), |
|
|
b.stride(1), |
|
|
c.stride(0), |
|
|
c.stride(1), |
|
|
) |
|
|
return c |