| |
|
|
| from typing import Optional, Tuple |
| from dataclasses import dataclass, fields |
|
|
| import cutlass |
| import cutlass.cute as cute |
| from cutlass import Int32 |
|
|
| import flash_attn.cute.utils as utils |
| from flash_attn.cute.fast_math import FastDivmod, clz |
|
|
|
|
| @dataclass |
| class ParamsBase: |
| def __extract_mlir_values__(self): |
| all_fields = [getattr(self, field.name) for field in fields(self)] |
| non_constexpr_fields = [f for f in all_fields if not isinstance(f, cutlass.Constexpr)] |
| values, self._values_pos = [], [] |
| for obj in non_constexpr_fields: |
| obj_values = cutlass.extract_mlir_values(obj) |
| values += obj_values |
| self._values_pos.append(len(obj_values)) |
| return values |
|
|
| def __new_from_mlir_values__(self, values): |
| all_fields = {field.name: getattr(self, field.name) for field in fields(self)} |
| constexpr_fields = {n: f for n, f in all_fields.items() if isinstance(f, cutlass.Constexpr)} |
| non_constexpr_fields = { |
| n: f for n, f in all_fields.items() if not isinstance(f, cutlass.Constexpr) |
| } |
| for (name, field), n_items in zip(non_constexpr_fields.items(), self._values_pos): |
| non_constexpr_fields[name] = cutlass.new_from_mlir_values(field, values[:n_items]) |
| values = values[n_items:] |
| return self.__class__(**non_constexpr_fields, **constexpr_fields) |
|
|
|
|
| @dataclass |
| class TileSchedulerArguments(ParamsBase): |
| num_block: Int32 |
| num_head: Int32 |
| num_batch: Int32 |
| seqlen_k: Int32 |
| headdim: Int32 |
| headdim_v: Int32 |
| total_q: Int32 |
| tile_shape_mn: cutlass.Constexpr[Tuple[int, int]] |
| mCuSeqlensQ: Optional[cute.Tensor] = None |
| mSeqUsedQ: Optional[cute.Tensor] = None |
| qhead_per_kvhead_packgqa: cutlass.Constexpr[int] = 1 |
| element_size: cutlass.Constexpr[int] = 2 |
| is_persistent: cutlass.Constexpr[bool] = False |
| lpt: cutlass.Constexpr[bool] = False |
|
|
|
|
| class SingleTileScheduler: |
| @dataclass |
| class Params(ParamsBase): |
| num_block: Int32 |
| num_head: Int32 |
| num_batch: Int32 |
|
|
| @staticmethod |
| def create( |
| args: TileSchedulerArguments, *, loc=None, ip=None |
| ) -> "SingleTileScheduler.Params": |
| return SingleTileScheduler.Params(args.num_block, args.num_head, args.num_batch) |
|
|
| def __init__(self, blk_coord: cute.Coord, *, loc=None, ip=None): |
| self._blk_coord = blk_coord |
| self._is_first_block = True |
| self._loc = loc |
| self._ip = ip |
|
|
| @staticmethod |
| def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params: |
| return SingleTileScheduler.Params.create(args, loc=loc, ip=ip) |
|
|
| @staticmethod |
| def create(params: Params, *, loc=None, ip=None) -> "SingleTileScheduler": |
| blk_coord = cute.arch.block_idx() |
| return SingleTileScheduler(blk_coord, loc=loc, ip=ip) |
|
|
| |
| @staticmethod |
| def get_grid_shape( |
| params: Params, |
| *, |
| loc=None, |
| ip=None, |
| ) -> Tuple[Int32, Int32, Int32]: |
| return params.num_block, params.num_head, params.num_batch |
|
|
| def get_current_work(self, *, loc=None, ip=None) -> cutlass.utils.WorkTileInfo: |
| return cutlass.utils.WorkTileInfo(self._blk_coord, self._is_first_block) |
|
|
| def initial_work_tile_info(self, *, loc=None, ip=None): |
| return self.get_current_work(loc=loc, ip=ip) |
|
|
| def prefetch_next_work(self, *, loc=None, ip=None): |
| pass |
|
|
| def advance_to_next_work(self, *, loc=None, ip=None): |
| self._is_first_block = False |
|
|
| def __extract_mlir_values__(self): |
| values, self._values_pos = [], [] |
| for obj in [self._blk_coord]: |
| obj_values = cutlass.extract_mlir_values(obj) |
| values += obj_values |
| self._values_pos.append(len(obj_values)) |
| return values |
|
|
| def __new_from_mlir_values__(self, values): |
| obj_list = [] |
| for obj, n_items in zip([self._blk_coord], self._values_pos): |
| obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items])) |
| values = values[n_items:] |
| return SingleTileScheduler(*(tuple(obj_list)), loc=self._loc) |
|
|
|
|
| class StaticPersistentTileScheduler: |
| @dataclass |
| class Params(ParamsBase): |
| num_block_divmod: FastDivmod |
| num_head_divmod: FastDivmod |
| total_blocks: Int32 |
|
|
| @staticmethod |
| def create( |
| args: TileSchedulerArguments, *, loc=None, ip=None |
| ) -> "StaticPersistentTileScheduler.Params": |
| total_blocks = args.num_block * args.num_head * args.num_batch |
| return StaticPersistentTileScheduler.Params( |
| FastDivmod.create(args.num_block), FastDivmod.create(args.num_head), total_blocks |
| ) |
|
|
| def __init__(self, params: Params, tile_idx: Int32, *, loc=None, ip=None): |
| self.params = params |
| self._tile_idx = tile_idx |
| self._loc = loc |
| self._ip = ip |
|
|
| @staticmethod |
| def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params: |
| return StaticPersistentTileScheduler.Params.create(args, loc=loc, ip=ip) |
|
|
| @staticmethod |
| def create(params: Params, *, loc=None, ip=None) -> "StaticPersistentTileScheduler": |
| tile_idx = cute.arch.block_idx()[0] |
| return StaticPersistentTileScheduler(params, tile_idx, loc=loc, ip=ip) |
|
|
| |
| @staticmethod |
| def get_grid_shape( |
| params: Params, |
| *, |
| loc=None, |
| ip=None, |
| ) -> Tuple[Int32, Int32, Int32]: |
| hardware_info = cutlass.utils.HardwareInfo() |
| sm_count = hardware_info.get_device_multiprocessor_count() |
| return (cutlass.min(sm_count, params.total_blocks), Int32(1), Int32(1)) |
|
|
| |
| def get_current_work(self, *, loc=None, ip=None) -> cutlass.utils.WorkTileInfo: |
| hn_idx, block_idx = self.params.num_block_divmod.divmod(self._tile_idx) |
| batch_idx, head_idx = self.params.num_head_divmod.divmod(hn_idx) |
| is_valid = self._tile_idx < self.params.total_blocks |
| |
| |
| return cutlass.utils.WorkTileInfo( |
| (Int32(block_idx), Int32(head_idx), Int32(batch_idx)), is_valid |
| ) |
|
|
| def initial_work_tile_info(self, *, loc=None, ip=None): |
| return self.get_current_work(loc=loc, ip=ip) |
|
|
| def prefetch_next_work(self, *, loc=None, ip=None): |
| pass |
|
|
| def advance_to_next_work(self, *, loc=None, ip=None): |
| self._tile_idx += cute.arch.grid_dim()[0] |
|
|
| def __extract_mlir_values__(self): |
| values, self._values_pos = [], [] |
| for obj in [self.params, self._tile_idx]: |
| obj_values = cutlass.extract_mlir_values(obj) |
| values += obj_values |
| self._values_pos.append(len(obj_values)) |
| return values |
|
|
| def __new_from_mlir_values__(self, values): |
| obj_list = [] |
| for obj, n_items in zip([self.params, self._tile_idx], self._values_pos,): |
| obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items])) |
| values = values[n_items:] |
| return StaticPersistentTileScheduler(*(tuple(obj_list)), loc=self._loc) |
|
|
|
|
| class SingleTileLPTScheduler: |
| @dataclass |
| class Params(ParamsBase): |
| total_blocks: Int32 |
| num_block_divmod: FastDivmod |
| num_head_divmod: FastDivmod |
| l2_minor_divmod: FastDivmod |
| l2_major_divmod: FastDivmod |
| l2_minor_residual_divmod: FastDivmod |
| num_hb_quotient: Int32 |
|
|
| @staticmethod |
| @cute.jit |
| def create( |
| args: TileSchedulerArguments, *, loc=None, ip=None |
| ) -> "SingleTileLPTScheduler.Params": |
| |
| size_one_kv_head = args.seqlen_k * (args.headdim + args.headdim_v) * args.element_size |
| size_one_head = size_one_kv_head |
| size_l2 = 50 * 1024 * 1024 |
| |
| |
| |
| |
| |
| log2_floor = lambda n: 31 - clz(n) |
| swizzle = 1 if size_l2 < size_one_head else (1 << log2_floor(size_l2 // size_one_head)) |
| |
| |
| |
| num_hb_quotient = (args.num_head * args.num_batch) // swizzle |
| num_hb_remainder = (args.num_head * args.num_batch) % swizzle |
| return SingleTileLPTScheduler.Params( |
| total_blocks=args.num_block * args.num_head * args.num_batch, |
| num_block_divmod=FastDivmod.create(args.num_block), |
| num_head_divmod=FastDivmod.create(args.num_head), |
| l2_minor_divmod=FastDivmod.create(swizzle), |
| l2_major_divmod=FastDivmod.create(swizzle * args.num_block), |
| l2_minor_residual_divmod=FastDivmod.create( |
| max(num_hb_remainder, 1) |
| ), |
| num_hb_quotient=Int32(num_hb_quotient), |
| ) |
|
|
| def __init__(self, params: Params, tile_idx: Int32, *, loc=None, ip=None): |
| self.params = params |
| self._tile_idx = tile_idx |
| self._loc = loc |
| self._ip = ip |
|
|
| @staticmethod |
| def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params: |
| return SingleTileLPTScheduler.Params.create(args, loc=loc, ip=ip) |
|
|
| @staticmethod |
| @cute.jit |
| def create(params: Params, *, loc=None, ip=None) -> "SingleTileLPTScheduler": |
| tile_idx = cute.arch.block_idx()[0] |
| return SingleTileLPTScheduler(params, tile_idx, loc=loc, ip=ip) |
|
|
| |
| @staticmethod |
| def get_grid_shape( |
| params: Params, |
| *, |
| loc=None, |
| ip=None, |
| ) -> Tuple[Int32, Int32, Int32]: |
| return (params.total_blocks, Int32(1), Int32(1)) |
|
|
| @cute.jit |
| def get_current_work(self, *, loc=None, ip=None) -> cutlass.utils.WorkTileInfo: |
| params = self.params |
| |
| bidhb, l2_mod = params.l2_major_divmod.divmod(self._tile_idx) |
| |
| |
| block, bidhb_residual = 0, 0 |
| if bidhb < params.num_hb_quotient: |
| block, bidhb_residual = params.l2_minor_divmod.divmod(l2_mod) |
| else: |
| block, bidhb_residual = params.l2_minor_residual_divmod.divmod(l2_mod) |
| bidhb_actual = bidhb * params.l2_minor_divmod.divisor + bidhb_residual |
| batch_idx, head_idx = params.num_head_divmod.divmod(bidhb_actual) |
| |
| block = params.num_block_divmod.divisor - 1 - block |
| is_valid = self._tile_idx < params.total_blocks |
| return cutlass.utils.WorkTileInfo( |
| (Int32(block), Int32(head_idx), Int32(batch_idx)), is_valid |
| ) |
|
|
| def initial_work_tile_info(self, *, loc=None, ip=None): |
| return self.get_current_work(loc=loc, ip=ip) |
|
|
| def prefetch_next_work(self, *, loc=None, ip=None): |
| pass |
|
|
| def advance_to_next_work(self, *, loc=None, ip=None): |
| |
| self._tile_idx = self.params.total_blocks |
|
|
| def __extract_mlir_values__(self): |
| values, self._values_pos = [], [] |
| for obj in [self.params, self._tile_idx]: |
| obj_values = cutlass.extract_mlir_values(obj) |
| values += obj_values |
| self._values_pos.append(len(obj_values)) |
| return values |
|
|
| def __new_from_mlir_values__(self, values): |
| obj_list = [] |
| for obj, n_items in zip([self.params, self._tile_idx], self._values_pos): |
| obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items])) |
| values = values[n_items:] |
| return SingleTileLPTScheduler(*(tuple(obj_list)), loc=self._loc) |
|
|
|
|
| class SingleTileVarlenScheduler: |
| @dataclass |
| class Params(ParamsBase): |
| num_head: Int32 |
| num_batch: Int32 |
| total_q: Int32 |
| max_kvblock_in_l2: Int32 |
| tile_shape_mn: cutlass.Constexpr[Tuple[int, int]] |
| mCuSeqlensQ: Optional[cute.Tensor] = None |
| mSeqUsedQ: Optional[cute.Tensor] = None |
| qhead_per_kvhead_packgqa: cutlass.Constexpr[int] = 1 |
| lpt: cutlass.Constexpr[bool] = False |
|
|
| @staticmethod |
| @cute.jit |
| def create( |
| args: TileSchedulerArguments, *, loc=None, ip=None |
| ) -> "SingleTileVarlenScheduler.Params": |
| size_l2 = 50 * 1024 * 1024 |
| max_kvblock_in_l2 = size_l2 // ((args.headdim + args.headdim_v) * args.element_size * args.tile_shape_mn[1]) |
| assert args.mCuSeqlensQ is not None or args.mSeqUsedQ is not None, ( |
| "At least one of mCuSeqlensQ or mSeqUsedQ must be provided" |
| ) |
| return SingleTileVarlenScheduler.Params( |
| num_head=args.num_head, |
| num_batch=args.num_batch, |
| total_q=args.total_q, |
| max_kvblock_in_l2=max_kvblock_in_l2, |
| tile_shape_mn=args.tile_shape_mn, |
| mCuSeqlensQ=args.mCuSeqlensQ, |
| mSeqUsedQ=args.mSeqUsedQ, |
| qhead_per_kvhead_packgqa=args.qhead_per_kvhead_packgqa, |
| lpt=args.lpt, |
| ) |
|
|
| def __init__(self, params: Params, tile_idx: Int32, *, loc=None, ip=None): |
| self.params = params |
| self._tile_idx = tile_idx |
| self._is_first_block = True |
| self._loc = loc |
| self._ip = ip |
|
|
| @staticmethod |
| def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params: |
| return SingleTileVarlenScheduler.Params.create(args, loc=loc, ip=ip) |
|
|
| @staticmethod |
| def create(params: Params, *, loc=None, ip=None) -> "SingleTileVarlenScheduler": |
| tile_idx = cute.arch.block_idx()[0] |
| return SingleTileVarlenScheduler(params, tile_idx, loc=loc, ip=ip) |
|
|
| |
| @staticmethod |
| def get_grid_shape( |
| params: Params, |
| *, |
| loc=None, |
| ip=None, |
| ) -> Tuple[Int32, Int32, Int32]: |
| total_blocks_max = ( |
| params.total_q + params.num_batch * (params.tile_shape_mn[0] - 1) |
| ) // params.tile_shape_mn[0] |
| return (total_blocks_max * params.num_head, Int32(1), Int32(1)) |
|
|
| @cute.jit |
| def _get_num_m_blocks(self, lane: Int32, bidb_start: Int32) -> Int32: |
| params = self.params |
| batch_idx = lane + bidb_start |
| if cutlass.const_expr(params.mSeqUsedQ is not None): |
| seqlen = Int32(0) |
| if batch_idx < params.num_batch: |
| seqlen = params.mSeqUsedQ[batch_idx] |
| else: |
| assert params.mCuSeqlensQ is not None |
| cur_cu_seqlen = Int32(0) |
| if batch_idx <= params.num_batch: |
| cur_cu_seqlen = params.mCuSeqlensQ[batch_idx] |
| next_cu_seqlen = cute.arch.shuffle_sync_down(cur_cu_seqlen, offset=1) |
| seqlen = next_cu_seqlen - cur_cu_seqlen |
| if cutlass.const_expr(params.qhead_per_kvhead_packgqa > 1): |
| seqlen *= params.qhead_per_kvhead_packgqa |
| return ( |
| cute.ceil_div(seqlen, params.tile_shape_mn[0]) |
| if batch_idx < params.num_batch and lane < cute.arch.WARP_SIZE - 1 |
| else Int32(0) |
| ) |
|
|
| @cute.jit |
| def get_current_work(self, *, loc=None, ip=None) -> cutlass.utils.WorkTileInfo: |
| params = self.params |
| lane_idx = cute.arch.lane_idx() |
| num_m_blocks = self._get_num_m_blocks(lane_idx, bidb_start=0) |
| num_m_blocks_cumulative = utils.warp_prefix_sum(num_m_blocks, lane_idx) |
| |
| m_blocks_in_group = cute.arch.shuffle_sync(num_m_blocks_cumulative, cute.arch.WARP_SIZE - 1) |
| |
| group_end_tile = m_blocks_in_group * params.num_head |
| |
| block, head_idx, batch_idx = Int32(0), Int32(0), Int32(0) |
| next_tile_idx = self._tile_idx |
| while group_end_tile <= next_tile_idx: |
| batch_idx += cute.arch.WARP_SIZE - 1 |
| if batch_idx >= params.num_batch: |
| batch_idx = Int32(params.num_batch) |
| group_end_tile = next_tile_idx + 1 |
| else: |
| num_m_blocks = self._get_num_m_blocks(lane_idx, bidb_start=batch_idx) |
| num_m_blocks_cumulative = utils.warp_prefix_sum(num_m_blocks, lane_idx) |
| m_blocks_in_group = cute.arch.shuffle_sync( |
| num_m_blocks_cumulative, cute.arch.WARP_SIZE - 1 |
| ) |
| group_end_tile += m_blocks_in_group * params.num_head |
| is_valid = False |
| if batch_idx >= params.num_batch: |
| block, head_idx, batch_idx = Int32(0), Int32(0), Int32(params.num_batch) |
| else: |
| group_start_tile = group_end_tile - m_blocks_in_group * params.num_head |
| |
| |
| |
| batch_idx_in_group = cute.arch.popc( |
| cute.arch.vote_ballot_sync( |
| group_start_tile + num_m_blocks_cumulative * params.num_head <= next_tile_idx |
| ) |
| ) |
| batch_idx += batch_idx_in_group |
| num_m_blocks_prev_lane = ( |
| 0 |
| if batch_idx_in_group == 0 |
| else cute.arch.shuffle_sync(num_m_blocks_cumulative, batch_idx_in_group - 1) |
| ) |
| num_m_blocks = cute.arch.shuffle_sync(num_m_blocks, batch_idx_in_group) |
| mh_block = next_tile_idx - group_start_tile - num_m_blocks_prev_lane * params.num_head |
| if cutlass.const_expr(params.lpt): |
| |
| |
| |
| |
| num_n_blocks = num_m_blocks * params.tile_shape_mn[0] // params.qhead_per_kvhead_packgqa // params.tile_shape_mn[1] |
| |
| |
| nheads_in_l2 = 16 if num_n_blocks * 16 <= params.max_kvblock_in_l2 else (8 if num_n_blocks * 8 <= params.max_kvblock_in_l2 else (4 if num_n_blocks * 4 <= params.max_kvblock_in_l2 else (2 if num_n_blocks * 2 <= params.max_kvblock_in_l2 else 1))) |
| nheads_in_l2 = min(nheads_in_l2, params.num_head) |
| mh_in_l2 = nheads_in_l2 * num_m_blocks |
| section_idx = mh_block // mh_in_l2 |
| l2_mod = mh_block - section_idx * mh_in_l2 |
| |
| nheads_in_this_section = nheads_in_l2 if nheads_in_l2 * (section_idx + 1) <= params.num_head else params.num_head - section_idx * nheads_in_l2 |
| block = l2_mod // nheads_in_this_section |
| head_idx_residual = l2_mod - block * nheads_in_this_section |
| head_idx = section_idx * nheads_in_l2 + head_idx_residual |
| block = num_m_blocks - 1 - block |
| else: |
| head_idx = mh_block // num_m_blocks |
| block = mh_block - head_idx * num_m_blocks |
| is_valid = self._is_first_block and batch_idx < params.num_batch |
| |
| return cutlass.utils.WorkTileInfo( |
| (Int32(block), Int32(head_idx), Int32(batch_idx)), is_valid |
| ) |
|
|
| def initial_work_tile_info(self, *, loc=None, ip=None): |
| return self.get_current_work(loc=loc, ip=ip) |
|
|
| def prefetch_next_work(self, *, loc=None, ip=None): |
| pass |
|
|
| def advance_to_next_work(self, *, loc=None, ip=None): |
| |
| self._is_first_block = False |
|
|
| def __extract_mlir_values__(self): |
| values, self._values_pos = [], [] |
| for obj in [self.params, self._tile_idx]: |
| obj_values = cutlass.extract_mlir_values(obj) |
| values += obj_values |
| self._values_pos.append(len(obj_values)) |
| return values |
|
|
| def __new_from_mlir_values__(self, values): |
| obj_list = [] |
| for obj, n_items in zip([self.params, self._tile_idx], self._values_pos, |
| ): |
| obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items])) |
| values = values[n_items:] |
| return SingleTileVarlenScheduler(*(tuple(obj_list)), loc=self._loc) |
|
|