| import torch |
|
|
| import triton |
| import triton.language as tl |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| @triton.heuristics({ |
| 'EVEN_K': lambda nargs: nargs['K'] % nargs['TILE_K'] == 0, |
| }) |
| @triton.jit |
| def _sdd_kernel( |
| A, B, C, |
| stride_za, stride_ha, stride_ma, stride_ak, |
| stride_zb, stride_hb, stride_bk, stride_nb, |
| stride_zc, stride_hc, stride_mc, stride_nc, |
| K, grid_offset, lut, |
| TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr, |
| BLOCK: tl.constexpr, EVEN_K: tl.constexpr |
| ): |
| |
| |
| |
| block_id = tl.program_id(1) + grid_offset |
| lut += block_id * 3 |
| |
| off_z = tl.program_id(2) |
| off_h = tl.load(lut + 0) |
|
|
| |
| start_am = tl.load(lut + 1) |
| offs_am = start_am * BLOCK + (tl.arange(0, TILE_M) % BLOCK) |
| offs_ak = tl.arange(0, TILE_K) |
| a_ptrs = A \ |
| + off_z * stride_za \ |
| + off_h * stride_ha \ |
| + offs_am[:, None] * stride_ma \ |
| + offs_ak[None, :] * stride_ak |
| |
| start_bn = tl.load(lut + 2) |
| offs_bn = start_bn * BLOCK + (tl.arange(0, TILE_N) % BLOCK) |
| offs_bk = tl.arange(0, TILE_K) |
| b_ptrs = B \ |
| + off_z * stride_zb \ |
| + off_h * stride_hb \ |
| + offs_bn[None, :] * stride_nb \ |
| + offs_bk[:, None] * stride_bk |
| |
| |
| |
| acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32) |
| for k in range(K, 0, -TILE_K): |
| if EVEN_K: |
| a = tl.load(a_ptrs) |
| b = tl.load(b_ptrs) |
| else: |
| a = tl.load(a_ptrs, mask=offs_ak[None, :] < k, other=0.) |
| b = tl.load(b_ptrs, mask=offs_bk[:, None] < k, other=0.) |
| acc += tl.dot(a, b) |
| a_ptrs += TILE_K * stride_ak |
| b_ptrs += TILE_K * stride_bk |
| c = acc.to(C.dtype.element_ty) |
| |
| |
| |
| offs_cm = tl.arange(0, TILE_M) % BLOCK |
| offs_cn = tl.arange(0, TILE_N) % BLOCK |
| pc = C \ |
| + off_z * stride_zc \ |
| + block_id * stride_hc \ |
| + offs_cm[:, None] * stride_mc \ |
| + offs_cn[None, :] * stride_nc |
| tl.store(pc, c, mask=True) |
|
|
|
|
| def sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, widths, out=None): |
| if a.stride(2) != 1 and a.stride(3) != 1: |
| a = a.contiguous() |
| if b.stride(2) != 1 and b.stride(3) != 1: |
| b = b.contiguous() |
| |
| if trans_c: |
| a, b = b, a |
| trans_a, trans_b = not trans_b, not trans_a |
| |
| a_dim = -2 if trans_a else -1 |
| b_dim = -1 if trans_b else -2 |
| Ka, Kb = a.shape[a_dim], b.shape[b_dim] |
| if Ka != Kb: |
| raise ValueError(f"Inner dimension mismatch (A: {Ka} vs B: {Kb})") |
| |
| if out is None: |
| c = torch.empty((a.shape[0], lut.shape[0], block, block), dtype=a.dtype, device=a.device) |
| else: |
| assert out.shape == (a.shape[0], lut.shape[0], block, block) |
| c = out |
| grid = [1, c.shape[1], c.shape[0]] |
| _sdd_kernel[grid]( |
| a, b, c, |
| a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3), |
| b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3), |
| c.stride(0), c.stride(1), c.stride(2), c.stride(3), |
| Ka, 0, lut, |
| TILE_M=block, TILE_N=block, TILE_K=32, BLOCK=block, num_stages=4, |
| num_warps=4, |
| ) |
| return c |
|
|
|
|
| def sdd_lut(layout, block, device): |
| lut = layout.nonzero(as_tuple=False).to(device).int() |
| lut = lut.contiguous() |
| return lut, None |
|
|
| |
| |
| |
| |
| |
|
|
|
|
| @triton.jit |
| def _dsd_kernel( |
| A, B, C, |
| stride_az, stride_ha, stride_am, stride_ak, |
| stride_zb, stride_hb, stride_bk, stride_bn, |
| stride_zc, stride_hc, stride_cm, stride_cn, |
| DS0, DS1, lut, |
| TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr, |
| GROUP_SIZE_M: tl.constexpr, BLOCK: tl.constexpr |
| ): |
| |
| |
| |
| pid_m = tl.program_id(0) |
| pid_n = tl.program_id(1) |
| num_pid_m = tl.num_programs(0) |
| num_pid_n = tl.num_programs(1) |
| pid_n, pid_m = tl.swizzle2d(pid_n, pid_m, num_pid_n, num_pid_m, GROUP_SIZE_M) |
| pidz = tl.program_id(2) |
| header = lut + pid_n * 4 |
| offset = tl.load(header + 0) |
| K = tl.load(header + 1) |
| column = tl.load(header + 2) |
| off_h = tl.load(header + 3) |
| pinc = lut + offset |
| |
| block_id = tl.load(pinc + 1) |
| block_id = tl.multiple_of(block_id, 8) |
| offs_am = tl.arange(0, TILE_M) |
| offs_ak = tl.arange(0, TILE_K) |
| pa = A + pidz * stride_az \ |
| + block_id * stride_ha \ |
| + offs_am[:, None] * stride_am \ |
| + offs_ak[None, :] * stride_ak |
| |
| offs_bn = pid_m * TILE_N + tl.arange(0, TILE_N) |
| offs_bn = tl.max_contiguous(tl.multiple_of(offs_bn % DS0, TILE_N), TILE_N) |
| start_bk = tl.load(pinc) |
| start_bk = tl.multiple_of(start_bk, 8) |
| offs_bk = start_bk + tl.arange(0, TILE_K) |
| pb = B + pidz * stride_zb \ |
| + off_h * stride_hb \ |
| + offs_bn[None, :] * stride_bn \ |
| + offs_bk[:, None] * stride_bk |
| |
| |
| |
| acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32) |
| pinc += 2 |
| inc_a = tl.load(pinc + 1) |
| inc_a = tl.multiple_of(inc_a, 8) |
| inc_b = tl.load(pinc) |
| inc_b = tl.multiple_of(inc_b, 8) |
| for k in range(K, 0, -TILE_K): |
| a = tl.load(pa, mask=True) |
| b = tl.load(pb, mask=offs_bn[None, :] < DS0) |
| acc += tl.dot(a, b) |
| pa += inc_a |
| pb += inc_b * stride_bk |
| pinc += 2 |
| inc_a = tl.load(pinc + 1) |
| inc_a = tl.multiple_of(inc_a, 8) |
| inc_b = tl.load(pinc) |
| inc_b = tl.multiple_of(inc_b, 8) |
| c = acc.to(C.dtype.element_ty) |
| |
| offs_cm = column * TILE_M + tl.arange(0, TILE_M) |
| offs_cn = pid_m * TILE_N + tl.arange(0, TILE_N) |
| pc = C \ |
| + off_h * stride_hc \ |
| + pidz * stride_zc \ |
| + offs_cm[:, None] * stride_cm \ |
| + offs_cn[None, :] * stride_cn |
| tl.store(pc, c, mask=offs_cn[None, :] < DS0) |
|
|
|
|
| def dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None): |
| if a.stride(2) != 1 and a.stride(3) != 1: |
| a = a.contiguous() |
| if b.stride(2) != 1 and b.stride(3) != 1: |
| b = b.contiguous() |
| |
| AS1 = block * spdims[2 if trans_a else 1] |
| BS0 = b.size(0) |
| BS1 = b.size(1) |
| BS3 = b.size(2 if trans_b else 3) |
| dtype = a.dtype |
| |
| CS0 = BS0 |
| CS1 = BS1 |
| CS2 = BS3 if trans_c else AS1 |
| CS3 = AS1 if trans_c else BS3 |
| if out is None: |
| c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device) |
| else: |
| assert out.shape == (CS0, CS1, CS2, CS3) |
| c = out |
| |
| TILE_N = 128 |
| |
| grid = lambda meta: [triton.cdiv(BS3, meta['TILE_N']), width, BS0] |
| _dsd_kernel[grid]( |
| a, b, c, |
| a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3), |
| b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3), |
| c.stride(0), c.stride(1), c.stride(3 if trans_c else 2), c.stride(2 if trans_c else 3), |
| BS3, AS1, lut, |
| TILE_M=block, TILE_N=TILE_N, TILE_K=min(block, 32), BLOCK=block, num_stages=4, |
| num_warps=4, GROUP_SIZE_M=4, |
| ) |
| |
| return c |
|
|
|
|
| def dsd_lut(layout, block, step, trans, device): |
| """ |
| Generates the look-up table for incrementing pointers in the DSD/DDS matmul. |
| Example (BLOCK=32, STEP=16) |
| [[1, 0, 0, 1, 0], |
| [0, 1, 1, 0, 1], |
| [1, 0, 1, 0, 0]] |
| |
| Then the offsets for A are |
| [0 , 16, 32, 48] <- row 0 |
| \\----/ \\----/ |
| col=0 col=3 |
| [64, 80, 96, 112, 128, 144] <- row 1 |
| \\----/ \\----/ \\------/ |
| col=1 col=2 col=3 |
| [160, 176, 192, 208] |
| which leads to increments table |
| [0, 16, 16, 16, || 64, 16, 16, 16, 16, 16, || 160, 16, 16, 16] |
| |
| Because B is dense, the offsets are |
| [0, 16, 96, 112] <- row 0 |
| [32, 48, 64, 80] <- row 1 |
| [0, 16, 64, 80] <- row 2 |
| """ |
| sizes = torch.sum(layout, 2 if trans else 1) |
| head_id, col_id = torch.ones_like(sizes).nonzero(as_tuple=True) |
| sizes = sizes.flatten() |
| segments = sizes * step |
| |
| if trans: |
| nnz = layout.nonzero(as_tuple=False) |
| else: |
| nnz = layout.transpose(1, 2).nonzero(as_tuple=False) |
| num_blocks = nnz.size(0) |
| offsets = torch.zeros_like(sizes) |
| offsets[1:] = torch.cumsum(sizes[:-1], dim=0) |
| offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets)) |
| |
| |
| |
| |
| |
| |
| B_idx = nnz[:, 2] * block |
| B_incs = B_idx.clone() |
| B_incs[1:] -= B_idx[:-1] |
| div = block // step |
| B_incs = B_incs.view(-1, 1).repeat(1, div) |
| B_incs[:, 1:] = step |
| B_incs[:, 0] -= (div - 1) * step |
| |
| B_incs[offsets[segments > 0], 0] = B_idx[offsets[segments > 0]] |
| B_incs = B_incs.view(-1) |
| |
| |
| |
| |
| if trans: |
| A_idx = torch.arange(num_blocks, device=layout.device) |
| else: |
| A_idx = torch.tensor([], dtype=torch.int64, device=layout.device) |
| current_offset = 0 |
| for z in range(layout.size(0)): |
| layoutw = layout[z, :, :].clone().long() |
| msum = layoutw.sum() |
| layoutw[layoutw > 0] = 1 + torch.arange(msum, device=layout.device) |
| A_idx = torch.cat((A_idx, current_offset + layoutw.T[layoutw.T > 0] - 1)) |
| current_offset += msum |
| A_incs = A_idx * block * block |
| A_incs[1:] -= A_idx[:-1] * block * block |
| A_incs = A_incs.view(-1, 1).repeat(1, div) |
| if trans: |
| A_incs[:, 1:] = step |
| A_incs[:, 0] -= (div - 1) * step |
| else: |
| A_incs[:, 1:] = step * block |
| A_incs[:, 0] -= (div - 1) * step * block |
| A_incs[offsets[segments > 0], 0] = A_idx[offsets[segments > 0]] |
| A_incs = A_incs.view(-1) |
| |
| width = col_id.size(0) |
| offsets = offsets * 2 * div + 4 * width |
| segments = segments * div |
| header = torch.stack((offsets, segments, col_id, head_id), dim=1).view(-1).contiguous() |
| |
| incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous() |
| |
| |
| pad = torch.zeros(20, device=incs.device, dtype=incs.dtype) |
| incs = torch.cat((incs, pad)) |
| |
| lut = torch.cat((header, incs)) |
| lut = lut.type(torch.int32).to(device) |
| |
| return lut, width |
|
|
| |
| |
| |
| |
|
|
|
|
| def dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None): |
| return dsd_matmul(b, a, not trans_b, not trans_a, not trans_c, spdims, block, lut, width, out=out) |
|
|
| |
| |
| |
|
|
|
|
| class _matmul(torch.autograd.Function): |
|
|
| fn = {'sdd': sdd_matmul, 'dsd': dsd_matmul, 'dds': dds_matmul} |
|
|
| @staticmethod |
| def forward( |
| ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, |
| c_lut, c_width, da_lut, da_width, db_lut, db_width, out |
| ): |
| c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_width, out=out) |
| |
| ctx.save_for_backward(a, b) |
| ctx.da_lut = da_lut |
| ctx.da_width = da_width |
| ctx.db_lut = db_lut |
| ctx.db_width = db_width |
| ctx.mode = mode |
| ctx.spdims = spdims |
| ctx.block = block |
| ctx.trans_a = trans_a |
| ctx.trans_b = trans_b |
| ctx.trans_c = trans_c |
| ctx.has_out = out is not None |
| return c |
|
|
| @staticmethod |
| def backward(ctx, dc): |
| |
| a, b = ctx.saved_tensors |
| da, db = None, None |
| mode = ctx.mode |
| |
| if ctx.needs_input_grad[0]: |
| mode_da = mode[1] + mode[0] + mode[2] |
| da = _matmul.fn[mode_da]( |
| dc, b, ctx.trans_c, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, ctx.da_lut, ctx.da_width, |
| ) |
| |
| if ctx.needs_input_grad[1]: |
| mode_db = mode[2] + mode[1] + mode[0] |
| db = _matmul.fn[mode_db]( |
| a, dc, not ctx.trans_a, ctx.trans_c, ctx.trans_b, ctx.spdims, ctx.block, ctx.db_lut, ctx.db_width, |
| ) |
| dout = dc if ctx.has_out else None |
| return da, db, None, None, None,\ |
| None, None, None, None,\ |
| None, None, None, None, None, dout |
|
|
|
|
| class matmul: |
|
|
| def __init__(self, layout, block, mode, device, trans_a=False, trans_b=False, trans_c=False): |
| if mode not in ['sdd', 'dsd', 'dds']: |
| raise NotImplementedError('Supported modes are: sdd, dsd, dds') |
| self.block = block |
| self.mode = mode |
| self.trans_a = trans_a |
| self.trans_b = trans_b |
| self.trans_c = trans_c |
| self.layout = layout |
| self.spdims = layout.shape |
| step = min(block, 32) |
| if self.mode == 'sdd': |
| self.c_lut, self.c_width = sdd_lut(layout, block, device) |
| self.da_lut, self.da_width = dsd_lut(layout, block, step, True, device) |
| self.db_lut, self.db_width = dsd_lut(layout, block, step, False, device) |
| if self.mode == 'dsd': |
| self.c_lut, self.c_width = dsd_lut(layout, block, step, not self.trans_a, device) |
| self.da_lut, self.da_width = sdd_lut(layout, block, device) |
| self.db_lut, self.db_width = dsd_lut(layout, block, step, self.trans_a, device) |
| if self.mode == 'dds': |
| self.c_lut, self.c_width = dsd_lut(layout, block, step, self.trans_b, device) |
| self.da_lut, self.da_width = dsd_lut(layout, block, step, not self.trans_b, device) |
| self.db_lut, self.db_width = sdd_lut(layout, block, device) |
|
|
| def __call__(self, a, b, out=None): |
| c = _matmul.apply( |
| a, b, self.trans_a, self.trans_b, self.trans_c, self.mode, self.spdims, self.block, |
| self.c_lut, self.c_width, |
| self.da_lut, self.da_width, |
| self.db_lut, self.db_width, |
| out |
| ) |
| return c |
|
|