File size: 623 Bytes
5fed0fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
import torch
import math
def ragged_attn(Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, row_lens: torch.Tensor) -> torch.Tensor:
"""
PyTorch baseline for ragged attention.
Q:[M,D], K:[N,D], V:[N,Dv], row_lens:[M] -> O:[M,Dv]
"""
M, D = Q.shape
N = K.shape[0]
scale = 1.0 / math.sqrt(D)
idx = torch.arange(N, device=Q.device)
mask = idx.unsqueeze(0) < row_lens.to(idx.dtype).unsqueeze(1)
scores = (Q @ K.T) * scale # [M,N]
scores = scores.masked_fill(~mask, float("-inf"))
P = torch.softmax(scores, dim=-1)
O = (P @ V).to(torch.float16)
return O
|